training_set_size = 55_000
    training_images, training_labels, valid_images, valid_labels \
        = train_validation_split(images, labels, training_set_size)

    params = {
        'batch_size': 64,
        'num_of_epochs': 20,
        'learning_rate': 0.1,
        'init_scale': 0.05,
        'keep_prob': 0.9,
        'ema': 0.999
    }
    model = MultiLayerNN([28 * 28, 500, 10], **params)
    model.fit(training_images, training_labels, valid_images, valid_labels)

    eval_images = extract_images('t10k-images-idx3-ubyte.gz')
    eval_images = np.reshape(eval_images, (-1, 28 * 28)) / 255
    eval_labels = extract_labels('t10k-labels-idx1-ubyte.gz')

    predictions = model.predict(eval_images, test=True)

    print("test accuracy: " +
          str(round(compute_accuracy(predictions, eval_labels), 2)))

    predictions = model.predict(eval_images)

    print("test accuracy: " +
          str(round(compute_accuracy(predictions, eval_labels), 2)))

    plot_confusion_matrices(eval_labels, predictions, classes=range(10))
示例#2
0
if __name__ == "__main__":
    (x_train, y_train), (x_test,
                         y_test) = tf.keras.datasets.cifar10.load_data()
    print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
    x_train = np.reshape(x_train, (-1, 32 * 32 * 3)) / 255
    x_test = np.reshape(x_test, (-1, 32 * 32 * 3)) / 255
    y_train = dense_to_one_hot(y_train)
    y_test = np.reshape(y_test, 10000)

    x_train, y_train, x_valid, y_valid = train_validation_split(
        x_train, y_train, training_set_size=45_000)

    params = {
        'batch_size': 32,
        'num_of_epochs': 10,
        'learning_rate': 0.1,
        'init_scale': 0.05,
        'keep_prob': 0.9,
        'ema': 0.999
    }
    model = MultiLayerNN([32 * 32 * 3, 100, 100, 10], **params)
    model.fit(x_train, y_train, x_valid, y_valid)

    predictions = model.predict(x_test, test=True)

    print("test accuracy: " +
          str(round(compute_accuracy(predictions, y_test), 2)))

    plot_confusion_matrices(y_test, predictions, classes=range(10))
示例#3
0
def run_decoding_analysis(spike_counts,
                          trsum,
                          trial_indices,
                          trial_indices_perstim,
                          trial_indices_nomatch=None,
                          match_distributions=True,
                          PlotDir=None,
                          fsuffix=None,
                          classifier='LDA'):
    parallel = False
    do_shuffle = True

    #Save plots
    if PlotDir is None:
        pdfdoc = None
        plot = False
    else:
        plot = True
        if fsuffix is None:
            fname = 'decoding_suppl-plots.pdf'
        else:
            fname = 'decoding_suppl-plots_{}.pdf'.format(fsuffix)
        pdfdoc = PdfPages(os.path.join(PlotDir, fname))

    #Match distributions
    if match_distributions:
        spike_counts_orig = spike_counts.copy()
        spike_counts, nSpikes_removed, _ = match_population_spikecount_distributions(
            spike_counts, trial_indices, trial_indices, pdfdoc)

    else:
        nSpikes_removed = 0

    #For decoding results
    nClasses = 4
    nKfold = 5
    nTrials, nBins, nNeurons = spike_counts.shape
    confusion_mat = np.zeros((2, nKfold, nClasses, nClasses))
    confusion_shf = np.zeros((2, nKfold, nClasses, nClasses))
    confusion_z = np.zeros((2, nKfold, nClasses, nClasses))
    CI95_shf = np.zeros((2, nKfold, 2, nClasses, nClasses))
    decoding_weights = np.zeros((2, nKfold, nClasses, nNeurons))
    decoding_weights_z = np.zeros((2, nKfold, nClasses, nNeurons))
    decoding_weights_m_shf = np.zeros((2, nKfold, nClasses, nNeurons))
    decoding_weights_s_shf = np.zeros((2, nKfold, nClasses, nNeurons))

    uniq_orientations = np.unique(trsum['orientation']).tolist()
    ##===== Loop over behavioral conditions=====##
    for iR, runstr in enumerate(['rest', 'run']):
        #Get trial indices for condition
        trial_indices_cond = trial_indices[iR]

        #Sum spikes in specific window
        X = np.sum(spike_counts[trial_indices_cond], axis=1)

        #Get class labels
        Y = np.array(
            trsum.iloc[trial_indices_cond]['orientation'].values).astype(int)
        nClasses = len(np.unique(Y))

        #Create cross-validation object
        k_fold = StratifiedKFold(n_splits=nKfold)

        #Run the processes in parallel
        if parallel:
            pool = mp.Pool(processes=min(nProcesses, nKfold))
            processes = []
        results = []

        #Loop over kfolds
        for iK, (train_index,
                 test_index) in enumerate(k_fold.split(trial_indices_cond, Y)):
            if parallel:
                processes.append(
                    pool.apply_async(decode_labels,
                                     args=(X, Y, train_index, test_index,
                                           uniq_orientations, None, do_shuffle,
                                           classifier)))
            else:
                tmp = decode_labels(X, Y, train_index, test_index,
                                    uniq_orientations, None, do_shuffle,
                                    classifier)
                results.append(tmp)

        #Extract results from parallel kfold processing
        if parallel:
            results = [p.get() for p in processes]
            pool.close()

        #Calculate decoding accuracy per kfold
        for iK, rTuple in enumerate(results):
            kfold_hits = rTuple[0]  #size [nClasses x nClasses]
            kfold_shf = rTuple[1]  #size [nShuffles,nClasses x nClasses]
            decoding_weights[iR, iK] = rTuple[2]  #nClasses x nNeurons
            decoding_weights_z[iR, iK] = rTuple[3]  #nClasses x nNeurons
            decoding_weights_m_shf[iR, iK] = rTuple[4]  #nClasses x nNeurons
            decoding_weights_s_shf[iR, iK] = rTuple[5]  #nClasses x nNeurons

            #Normalize confusion matrix
            confusion_mat[iR, iK] = kfold_hits / np.sum(kfold_hits,
                                                        axis=1).reshape(-1, 1)

            if do_shuffle:
                #Loop through shuffles and normalize
                c_shf = np.zeros((nShuffles, nClasses, nClasses))
                for iS in range(nShuffles):
                    c_shf[iS] = kfold_shf[iS] / np.sum(kfold_shf[iS],
                                                       axis=1).reshape(-1, 1)

                #Calculate z-score for this kfold
                m_shf, s_shf = np.mean(c_shf, axis=0), np.std(c_shf, axis=0)
                confusion_shf[iR, iK] = m_shf
                confusion_z[iR, iK] = (confusion_mat[iR, iK] - m_shf) / s_shf

                #Calculate 95% CI for this kfold
                w = 2.576 * s_shf / np.sqrt(nShuffles)
                CI95_shf[iR, iK, 0] = m_shf - w
                CI95_shf[iR, iK, 1] = m_shf + w

            if plot:
                #Get signficance of decoding
                pvalues_kfold = st.norm.sf(confusion_z[iR, iK])

                #Plot shuffle distributions
                title = 'Shuffle Distributions for kfold {}, {} behavioral condition'.format(
                    iK, runstr)
                usrplt.plot_decoding_shuffle(confusion_mat[iR, iK], c_shf,
                                             pvalues_kfold, title, pdfdoc)

    if plot:
        for iR, runstr in enumerate(['rest', 'run']):
            #Calculate mean decoding performance over kfolds
            mKfold = np.mean(confusion_mat[iR], axis=0)
            mKfoldz = np.mean(confusion_z[iR], axis=0)

            #Get signficance of decoding
            mPvalues = st.norm.sf(mKfoldz)

            #Plot shuffle distributions
            title = 'Decoding Performance, {} behavioral condition'.format(
                runstr)
            usrplt.plot_confusion_matrices(mKfold, mKfoldz, mPvalues,
                                           uniq_orientations, title, pdfdoc)
        pdfdoc.close()

    return (confusion_mat, confusion_shf, confusion_z, CI95_shf,
            nSpikes_removed, decoding_weights, decoding_weights_z,
            decoding_weights_m_shf, decoding_weights_s_shf)