Beispiel #1
0
def mask(img, xdim, ydim):

    utils.plot_histogram(img)

    [B, G, R] = cv.split(img)
    blue = B.astype(float)
    green = G.astype(float)
    red = R.astype(float)

    meanR = np.mean(red)
    stdR = np.std(red)
    print meanR + 1.6 * stdR
    meanB = np.mean(blue)
    stdB = np.std(blue)
    print meanB + 1.1 * stdB

    mode_pixel = utils.get_mode(img, xdim, ydim)

    # separate into roads and houses
    for i in xrange(xdim):
        for j in xrange(ydim):
            # road: red value is at least 2 std above the mean
            if red[i, j] > meanR + 1.6 * stdR:  # red[i,j] > 180
                img[i, j] = mode_pixel
            # houses: blue value is at least 1 std above the mean
            if blue[i, j] > meanB + 1.1 * stdB:  # 182: #and blue[i,j] <= 238:
                img[i, j] = (0, 0, 0)

    utils.show_image(img, 'mask')

    return img
Beispiel #2
0
def mask(img, xdim, ydim):

  utils.plot_histogram(img)

  [B,G,R] = cv.split(img)
  blue = B.astype(float)
  green = G.astype(float)
  red = R.astype(float)

  meanR = np.mean(red)
  stdR = np.std(red)
  print meanR + 1.6 * stdR
  meanB = np.mean(blue)
  stdB = np.std(blue)
  print meanB + 1.1 * stdB

  mode_pixel = utils.get_mode(img, xdim, ydim)

  # separate into roads and houses
  for i in xrange(xdim):
    for j in xrange(ydim):
      # road: red value is at least 2 std above the mean
      if red[i,j] > meanR + 1.6 * stdR: # red[i,j] > 180
        img[i,j] = mode_pixel
      # houses: blue value is at least 1 std above the mean
      if blue[i,j] > meanB + 1.1 * stdB: # 182: #and blue[i,j] <= 238:
        img[i,j] = (0,0,0)

  utils.show_image(img, 'mask')

  return img
def evaluate_training_generator(generator, eng, params, num_imgs=100):

    # generate images
    z = sample_z(num_imgs, params)
    imgs = generator(z, params)

    # efficiencies of generated images
    effs = compute_effs(imgs, eng, params)
    effs_mean = torch.mean(effs.view(-1))

    # save the highest
    save_the_max(imgs, effs, params)

    # binarization of generated images
    binarization = torch.mean(torch.abs(imgs.view(-1))).cpu().detach().numpy()

    # diversity of generated images
    diversity = torch.mean(torch.std(imgs, dim=0)).cpu().detach().numpy()
    print("diversity:", diversity)
    #diversity penalty
    var = torch.mean(torch.var(imgs, dim=0)).cpu().detach().numpy()
    print("var:", var)

    # plot histogram
    fig_path = params.output_dir + '/figures/histogram/Iter{}.png'.format(
        params.iter)
    utils.plot_histogram(effs.data.cpu().numpy().reshape(-1), params.iter,
                         fig_path, params)

    return effs_mean, binarization, diversity
Beispiel #4
0
def evaluate(generator, eng, numImgs, params):
    generator.eval()

    filename = 'ccGAN_imgs_Si_w' + \
        str(params.w) + '_' + str(params.a) + 'deg.mat'
    img, strucs = sample_images(generator, numImgs, params)
    file_path = os.path.join(params.output_dir, 'outputs', filename)
    logging.info('Generation is done. \n')

    Efficiency = torch.zeros(numImgs)

    wavelength = matlab.double([params.w] * numImgs)
    desired_angle = matlab.double([params.a] * numImgs)
    abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
    Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)
    max_eff_index = np.argmax(Efficiency)
    max_eff = Efficiency[max_eff_index]
    best_struc = strucs[max_eff_index, :, :].reshape(-1)

    fig_path = params.output_dir + '/figures/Efficiency.png'
    utils.plot_histogram(Efficiency, params.numIter, fig_path)

    print('{} {} {} {} {} {} {:.2f}'.format('The best efficiency for',
                                            'wavelength =', params.w,
                                            'and angle =', params.a, 'is',
                                            max_eff))
    io.savemat(file_path,
               mdict={
                   'strucs': strucs,
                   'effs': Efficiency,
                   'best_struc': best_struc,
                   'max_eff_index': max_eff_index,
                   'max_eff': max_eff
               })
Beispiel #5
0
def evaluate(generator, eng, numImgs, params):
    generator.eval()

    # generate images
    z = sample_z(numImgs, params)
    images = generator(z, params)
    logging.info('Generation is done. \n')

    # evaluate efficiencies
    images = torch.sign(images)
    effs = compute_effs(images, eng, params)

    # save images
    filename = 'imgs_w' + str(params.wavelength) + '_a' + str(
        params.angle) + 'deg.mat'
    file_path = os.path.join(params.output_dir, 'outputs', filename)
    io.savemat(file_path,
               mdict={
                   'imgs': images.cpu().detach().numpy(),
                   'effs': effs.cpu().detach().numpy()
               })

    # plot histogram
    fig_path = params.output_dir + '/figures/Efficiency.png'
    utils.plot_histogram(effs.data.cpu().numpy().reshape(-1), params.numIter,
                         fig_path)
Beispiel #6
0
def measure_models(m1, m2, i):
    m1_conv = m1['conv1.0.weight'].view(-1)
    m2_conv = m2['conv1.0.weight'].view(-1)
    m1_linear = m1['linear.weight']
    m2_linear = m2['linear.weight']

    print(m1_conv.shape, m1_linear.mean(0).shape)

    l1_conv = (m1_conv - m2_conv).abs().sum()
    l1_linear = (m1_linear - m2_linear).abs().sum()
    print("\nL1 Dist: {} - {}".format(l1_conv, l1_linear))

    l2_conv = np.linalg.norm(m1_conv - m2_conv)
    l2_linear = np.linalg.norm(m1_linear - m2_linear)
    print("L2 Dist: {} - {}".format(l2_conv, l2_linear))

    linf_conv = np.max((m1_conv - m2_conv).abs().cpu().numpy())
    linf_linear = np.max((m1_linear - m2_linear).abs().cpu().numpy())
    print("Linf Dist: {} - {}".format(linf_conv, linf_linear))

    cov_m1_m2_conv = np.cov(m1_conv, m2_conv)[0, 1]
    cov_m1_m2_linear = np.cov(m1_linear, m2_linear)[0, 1]
    print("Cov m1-m2: {} - {}".format(cov_m1_m2_conv, cov_m1_m2_linear))

    cov_m1_conv_linear = np.cov(m1_conv, m1_linear.mean(0))[0, 1]
    cov_m2_conv_linear = np.cov(m2_conv, m2_linear.mean(0))[0, 1]
    print("Cov m1-conv-linear: {} - {}\n\n".format(cov_m1_conv_linear,
                                                   cov_m2_conv_linear))

    return
    utils.plot_histogram(
        [m1_conv.view(-1).cpu().numpy(),
         m2_conv.view(-1).cpu().numpy()],
        save=False)
    utils.plot_histogram(
        [m1_linear.view(-1).cpu().numpy(),
         m2_linear.view(-1).cpu().numpy()],
        save=False)

    for l, name in [(m1_conv, 'conv1.0'), (m1_linear, 'linear')]:
        params = l.cpu().numpy()
        save_dir = 'params/{}/{}/{}'.format(args.data, args.net, name)
        if not os.path.exists(save_dir):
            print("making ", save_dir)
            os.makedirs(save_dir)
        path = '{}/{}_{}.npy'.format(save_dir, name, i)
        print(i)
        print('saving param size: ', params.shape, 'to ', path)
        np.save(path, params)
Beispiel #7
0
def PCA_analysis(generator, pca, eng, params, numImgs=100):
    generator.eval()
    imgs = sample_images(generator, numImgs, params)
    generator.train()

    Efficiency = torch.zeros(numImgs)

    img = torch.squeeze(imgs[:, 0, :]).data.cpu().numpy()
    img = matlab.double(img.tolist())
    wavelength = matlab.double([params.w] * numImgs)
    desired_angle = matlab.double([params.a] * numImgs)

    abseffs = eng.Eval_Eff_1D_parallel(img, wavelength, desired_angle)
    Efficiency = torch.Tensor([abseffs]).data.cpu().numpy().reshape(-1)

    # img = img[np.where(Efficiency.reshape(-1) > 0), :]
    # Efficiency = Efficiency[Efficiency > 0]

    img_2 = pca.transform(img)

    fig_path = params.output_dir + \
        '/figures/scatter/Iter{}.png'.format(params.iter)
    utils.plot_scatter(img_2, Efficiency, params.iter, fig_path)

    fig_path = params.output_dir + \
        '/figures/histogram/Iter{}.png'.format(params.iter)
    utils.plot_histogram(Efficiency, params.iter, fig_path)

    imgs = imgs[:8, :, :].unsqueeze(2).repeat(1, 1, 64, 1)
    fig_path = params.output_dir + \
        '/figures/deviceSamples/Iter{}.png'.format(params.iter)
    save_image(imgs, fig_path, 2)
    '''
    grads = eng.GradientFromSolver_1D_parallel(img, wavelength, desired_angle)
    grad_2 = pca.transform(grads)
    if params.iter % 2 == 0:
        utils.plot_envolution(params.img_2_prev, params.eff_prev, params.grad_2_prev,
                              img_2, Efficiency, params.iter, params.output_dir)
    else:
        utils.plot_arrow(img_2, Efficiency, grad_2, params.iter, params.output_dir)
    params.img_2_prev = img_2
    params.eff_prev = Efficiency
    params.grad_2_prev = grad_2
    '''
    return img_2, Efficiency
Beispiel #8
0
def evaluate_training_generator(generator, func, params, num_imgs=10):
    # generate images
    z = sample_z(num_imgs, params)
    imgs = generator(z, params)

    # efficiencies of generated images
    effs = compute_effs(imgs, func, params)
    effs_mean = torch.mean(effs.view(-1))

    # binarization of generated images
    binarization = torch.mean(torch.abs(imgs.view(-1))).cpu().detach().numpy()

    # diversity of generated images
    diversity = torch.mean(torch.std(imgs, dim=0)).cpu().detach().numpy()

    # plot histogram
    fig_path = params.output_dir + '/figures/histogram/Iter{}.png'.format(
        params.iter)
    utils.plot_histogram(effs.data.cpu().numpy().reshape(-1), params.iter,
                         fig_path)

    return effs_mean, binarization, diversity
Beispiel #9
0
import cv2
import numpy as np 
from utils import halftoning, plot_histogram
from argparser import Parser

args = Parser()
img_path = args.get_arg('image')
output_filename = args.get_arg('output_path')
error_dist = args.get_arg('error_dist')
sweep_mode = args.get_arg('sweep_mode')
display_mode = args.get_arg('display_mode')

img_path = 'images/peppers.png' if img_path is None else img_path

img = cv2.imread(img_path, cv2.IMREAD_COLOR)
res_img = halftoning(np.array(img), edist_id=error_dist, sweep_mode=sweep_mode)

if output_filename is not None:
    cv2.imwrite(output_filename, res_img)

if display_mode == 'hist':
    plot_histogram(res_img)
elif display_mode is None or display_mode == 'images':
    cv2.imshow('source image', img)
    cv2.imshow('resulting image', res_img)

cv2.waitKey(0)
cv2.destroyAllWindows()
def getLeafInfo(collate_leaves, feature, leafvalues, extension=""):
    # Add the examples for each leaf
    allAgreement = {}
    relationcount = defaultdict(lambda: 0)
    total = 0
    for leaf_num, _ in enumerate(collate_leaves):
        leaf_node = collate_leaves[leaf_num]

        ag_examples, dis_examples, relation_dict, head_pos_dict, child_pos_dict, example, sorted_examplecount = utils.getAggreeingExamples(
            leaf_node, feature, data, leafvalues[leaf_num],
            data_loader.train_random_samples)

        with open(
                f"{folder_name}/{lang}/{feature}/{feature}-{leaf_num}-{extension}.html",
                'w') as outp2:
            HEADER = ORIG_HEADER.replace("main.css", "../../main.css")
            outp2.write(HEADER + '\n')
            outp2.write(
                f'<ul class="nav"><li class="nav"><a class="active" href=\"../../index.html\">Home</a>'
                f'</li><li class="nav"><a href=\"../../introduction.html\">Usage</a></li>'
                f'<li class="nav"><a href=\"../../about.html\">About Us</a></li></ul>'
            )
            outp2.write(
                f"<br><li><a href=\"{feature}.html\">Back to {feature} {language_fullname} page</a></li>\n"
            )
            if len(relation_dict) > 0:
                utils.plot_histogram(
                    relation_dict,
                    color='peru',
                    type='Relation',
                    file=
                    f"./{folder_name}/{lang}/{feature}/{leaf_num}-{extension}")
            if len(head_pos_dict) > 0:
                utils.plot_histogram(
                    head_pos_dict,
                    color='seagreen',
                    type='Head-POS',
                    file=
                    f"./{folder_name}/{lang}/{feature}/{leaf_num}-{extension}")
            if len(child_pos_dict) > 0:
                utils.plot_histogram(
                    child_pos_dict,
                    color='olivedrab',
                    type='Child-POS',
                    file=
                    f"./{folder_name}/{lang}/{feature}/{leaf_num}-{extension}")
            outp2.write(f"<h2>Distribution of features within this leaf </h2>")

            outp2.write(
                f"<p style = \"float: left; font-size: 15pt; text-align: center; width: 33%; \"><img src=\"{leaf_num}-Relation.png\" alt=\"Relation\" style=\"width:100%\"></p>"
            )
            outp2.write(
                f"<p style = \"float: left; font-size: 15pt; text-align: center; width: 33%; \"><img src=\"{leaf_num}-Head-POS.png\" alt=\"head-pos\" style=\"width:100%\"></p>"
            )
            outp2.write(
                f"<p style = \"float: left; font-size: 15pt; text-align: center; width: 33%;\"><img src=\"{leaf_num}-Child-POS.png\" alt=\"child-pos\" style=\"width:100%\"></p><br>"
            )
            if not ag_examples:
                outp2.write("\tNo agree examples found.<br>")
            else:
                outp2.write(
                    "<h2>Agreement Rules sorted by frequency.</h2> <ul>")

                required_relation, required_head, required_child, _, _ = utils.parseLeafInformation(
                    leaf_node[1])
                for (key, val) in sorted_examplecount:
                    rule_template = ""
                    (relation, head_pos, child_pos) = key
                    ex = example[key]

                    if leaf_node[0] == "agreement":
                        if relation not in allAgreement:
                            allAgreement[relation] = {}
                        allAgreement[relation][(head_pos, child_pos)] = val

                        relationcount[relation] += val
                        total += val
                    if required_relation is not None:
                        if relation not in relation_map:
                            if relation.split("@")[0] in relation_map:
                                full_relation_name = relation_map[
                                    relation.split("@")[0]][0]
                                url = relation_map[relation.split("@")[0]][1]
                            else:
                                full_relation_name = relation
                                url = f'https://universaldependencies.org/'
                        else:
                            full_relation_name = relation_map[relation][0]
                            url = relation_map[relation][1]

                        rule_template = f" When the dependent token is the "
                        rule_template += f"<i>{full_relation_name}</i>(<a href=\"{url}\">{relation})</a> of the head token, "
                    if required_head is not None:
                        if len(rule_template) == 0:
                            rule_template = f'<p> When the head token is <i>{head_pos}</i>  '
                        else:
                            rule_template += f" and the head token is <i>{head_pos}</i> "
                    if required_child is not None:
                        if len(rule_template) == 0:
                            rule_template = f'<p> When the dependent token is <i>{head_pos}</i>  '
                        else:
                            rule_template += f" and the dependent token is <i>{child_pos}</i>."
                    outp2.write(f"<li>{rule_template}</li>")
                    utils.example_web_print(ex, outp2, data)
                    outp2.write(f"<br>")
                outp2.write("</ul>")

            if not dis_examples:
                outp2.write("\tNo disagree examples found.<br>")
            else:
                outp2.write("\t<br><h2>Disagree Examples:</h2>")
                for ex in dis_examples:
                    utils.example_web_print(ex, outp2, data)

            outp2.write(FOOTER)

    #GetSummary of the agreement rules
    #Sort the agreement by relation type
    if len(allAgreement) == 0:
        summary = [f'<p>There is no agreement for {feature}.</p>']
        return summary
    sorted_relation = sorted(relationcount.items(),
                             key=lambda kv: kv[1],
                             reverse=True)
    summary = []
    summary.append(f'<ol>')
    rulenum = 1
    headchilddict = defaultdict(set)
    for (relation, val) in sorted_relation:
        sorted_headchild = sorted(allAgreement[relation].items(),
                                  key=lambda kv: kv[1],
                                  reverse=True)
        #Group-by head
        GroupbyHead, GroupbyHeadInfo, GroupByChild, GroupByChildInfo = defaultdict(
            lambda: 0), defaultdict(set), defaultdict(lambda: 0), defaultdict(
                set)
        child, head = False, False
        if relation is None:
            full_relation_name = 'anything'
        else:
            if relation not in relation_map:
                if relation.split("@")[0] in relation_map:
                    full_relation_name = relation_map[relation.split("@")
                                                      [0]][0]
                else:
                    full_relation_name = relation
            else:
                full_relation_name = relation_map[relation][0]
        for (headchild, value) in sorted_headchild:
            if value * 1.0 / val < 0.5:
                continue
            (head, child) = headchild
            if child is not None and head is not None:
                GroupByChild[child] += value
                GroupByChildInfo[child].add(head)
                GroupbyHead[head] += value
                GroupbyHeadInfo[head].add(child)
                child, head = True, True
            else:
                if head is None:
                    head = False
                else:
                    GroupbyHead[head] = 0
                    head = True
                if child is None:
                    child = False
                else:
                    GroupByChild[child] = 0
                    child = True

        if not head and child:
            all_childpos = ",".join(list(GroupByChild.keys()))
            key = f'<i>{all_childpos}</i> tokens agree with their head'
            value = f'<i>{full_relation_name}({relation})</i>'

            headchilddict[key].add(value)
        elif not child and head:
            all_headpos = ",".join(list(GroupbyHead.keys()))
            key = f'<i>{all_headpos}</i> tokens agree with their dependent tokens'
            value = f'<i>{full_relation_name}({relation})</i>'
            headchilddict[key].add(value)

        elif not child and not head:
            key = f'All tokens agree with their head tokens'
            value = f'<i>{full_relation_name} ({relation})</i>'
            headchilddict[key].add(value)

        elif child and head:
            #sort group-by-head and group-by-child and compare the highest values, whichever is higher, choose that as the condition of groupping
            sort_grpchild = sorted(GroupByChild.items(),
                                   key=lambda kv: kv[1],
                                   reverse=True)
            sort_grphead = sorted(GroupbyHead.items(),
                                  key=lambda kv: kv[1],
                                  reverse=True)

            if sort_grpchild[0][1] > sort_grphead[0][1]:  #Grp by child
                for (child, _) in sort_grpchild:
                    headpos = ", ".join(list(GroupByChildInfo[child]))
                    headchilddict[
                        f'<i>{child}</i> tokens agree when head token belongs to [<i>{headpos}</i>]'].add(
                            f'<i>{full_relation_name}({relation})</i>')
            else:
                for (head, _) in sort_grphead:
                    childpos = ", ".join(list(GroupbyHeadInfo[head]))
                    headchilddict[
                        f'<i>{head}</i> tokens agree when the dependent token belongs to [<i>{childpos}</i>]'].add(
                            f'<i>{full_relation_name}({relation})</i>')

    for rule, relations in headchilddict.items():
        summary.append(
            f'<li> {rule} for the dependency relations: {", ".join(list(relations))} </li><br>'
        )
        rulenum += 1
    summary.append(f'</ol>')
    summary = "".join(summary)
    return summary
def main():

    #**************************************************
    #**************** SIDEBAR SECTION *****************
    #**************************************************

    search_word = st.sidebar.text_input('Insira a palavra a ser pesquisada:')

    search_language = st.sidebar.selectbox('Selecione o idioma:',
                                           ['Inglês', 'Português'])

    chk_retweet = st.sidebar.checkbox('Incluir Retweets no resultado')

    #date_since = st.sidebar.date_input(
    #    "Escolha a data inicial da captura de tweets:",
    #    datetime.date(2019, 7, 6)
    #)

    # Add a file_uploader to the sidebar:
    add_tweets_slider = st.sidebar.slider(
        'Quantidade de tweets a serem retornados: ', 1, 500)

    # **************************************************
    # **************** MAIN SECTION *****************
    # **************************************************

    #Page's Title and Subtitle
    st.title("Análise de Tweets via Twitter API")
    st.subheader("Por: Ysabelle Sousa")

    api = utils.connect_twitter_api(config.CONSUMER_KEY, config.CONSUMER_SECRET, config.ACCESS_TOKEN,\
                                    config.ACCESS_TOKEN_SECRET)

    if api and search_word != '':

        st.success("Conexão com a API realizada com sucesso!")

        st.write('**Tweets sobre:** ', search_word)

        tweets = utils.searching_tweets(api, search_word,
                                        datetime.date(2020, 1,
                                                      1), search_language,
                                        add_tweets_slider, chk_retweet)

        df_tweets = utils.gathering_tweet_information(tweets)

        st.write('**Média de Seguidores dos Usuários:** ',
                 round(df_tweets['followers_count'].mean(), 2))
        st.write('**Média de Favoritos:** ',
                 round(df_tweets['tweet_favorite_count'].mean(), 2))
        st.write('**Média de Retweets:** ',
                 round(df_tweets['tweet_retweet_count'].mean(), 2))

        #TABELA
        st.subheader('Últimos Tweets:')
        st.table(df_tweets[['tweet', 'tweet_date'
                            ]].sort_values('tweet_date',
                                           ascending=False).head(7))

        #BARCHART
        st.subheader('Localizações dos Usuários:')
        st.bar_chart(
            df_tweets[df_tweets['location'] != '']['location'].sort_values(
                ascending=False).unique()[:40])

        #WORDCLOUDS
        st.subheader('Principais palavras:')
        st.pyplot(utils.generating_wordcloud(df_tweets))

        st.subheader('Principais hashtags:')
        st.pyplot(utils.generating_wordcloud_hashtag(df_tweets))

        #HISTOGRAMAS
        st.subheader('Frequência de Seguidores:')
        utils.plot_histogram(df_tweets['followers_count'])

        st.subheader('Frequência de Favoritos:')
        utils.plot_histogram(df_tweets['tweet_favorite_count'])

        st.subheader('Frequência de Retweets:')
        utils.plot_histogram(df_tweets['tweet_retweet_count'])
Beispiel #12
0
# 
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING.  If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.

import sys
sys.path.insert(0, './extras/')
sys.path.insert(0, './')

import apt
import utils
import numpy as np
import scipy.signal

from PIL import Image

matrix = apt.decode('wav/am_demod/sample.wav')


utils.plot_histogram(matrix,'Imagen completa')
utils.plot_image(matrix,'Imagen completa')

# Giro la imagen 180 grados porque el satélite recorría de Sur a Norte
frameA = utils.flip(utils.get_frame(matrix,"A"))
frameB = utils.flip(utils.get_frame(matrix,"B"))

#
utils.plot_histogram(frameB,'Histograma Banda Infrarroja', save = True)
utils.plot_histogram(frameA,'Histograma Espectro visible', save = True)
Beispiel #13
0
import numpy as np
import matplotlib.pyplot as plt
import utils

# load the data
all_data_uniform = np.load('uniform_r12t9.npy')
all_data_clustered = np.load('clustered_r12t9.npy')

# load corr coefs
rho_uniform = np.load('rho_uniform_1000.npy')
rho_clustered = np.load('rho_clustered_1000.npy')

# get vector representation of uniform corr coef
rho_uni_vec = utils.extract_all_corr_coef(rho_uniform)
rho_clus_vec = utils.extract_all_corr_coef(rho_clustered)

# get matrix of corr coefs per cluster
rho_per_cluster = utils.extract_cluster_corr_coef(rho_clustered)
rho_uniform_random = utils.extract_random_corr_coef(rho_uniform,
                                                    size=len(rho_per_cluster))

# plot histogram
plt.ion()
utils.plot_histogram(rho_uniform_random,
                     rho_per_cluster,
                     binwidth=0.01,
                     xlabel='Correlation same cluster')
Beispiel #14
0
    display = True
    if display == True:
        plt.figure()
        plt.title('Telemetry Vector')
        plt.plot(tlmtry.get_vector(telemetry), label='Telemetry Vector')
        plt.plot(tlmtry.get_vector(telemetry_norm),
                 label='Normalized Telemetry Vector')
        plt.legend()
        plt.show()
    '''
    Histogram
    '''

    display = False
    if display == True:
        utils.plot_histogram(matrix, "Raw Histogram")
        utils.plot_histogram(img_filtered, "Raw Filtered")

    matrix = img_filtered

    frame_A = utils.get_frame(matrix, "A")
    frame_B = utils.get_frame(matrix, "B")

    ##SPACE AND TIME FRAME SYNC

    space_time_sync_frame = tlmtry.get_space_time_sync_frame(matrix, "B")
    space_time_sync_frame = Image.fromarray(space_time_sync_frame)

    display = True
    if display == True:
        plt.figure()
Beispiel #15
0
})
print("Predicting fake data using KNN classifier")
knn_preds = knn_clf.predict(output)
preds = prediction_fn.eval(feed_dict={
    x: np.reshape(fake_batch, [-1, 4096]),
    keep_prob: 1.0
})
softmax = softmax_fn.eval(feed_dict={
    x: np.reshape(fake_batch, [-1, 4096]),
    keep_prob: 1.0
})

# test on different data
name = 'wgan_sid_0_5000'
print("Computing histogram of predicted probabilities")
plot_histogram(softmax.flatten(),
               filename='histogram_probs_{}.png'.format(name))

print("Computing histogram of highest probabilities")
plot_histogram(softmax[np.arange(len(softmax)),
                       np.argmax(softmax, axis=1)],
               filename='histogram_max_probs_{}.png'.format(name))

print("Computing and saving Confusion Matrix")
cm = confusion_matrix(y_true=preds,
                      y_pred=knn_preds,
                      labels=np.arange(0, n_classes))
plot_confusion_matrix(cm,
                      np.arange(0, n_classes),
                      normalize=True,
                      filename='conf_mat_cnn_knn_{}.png'.format(name))
np.save('conf_mat_cnn_knn_{}.npy'.format(name), cm)
Beispiel #16
0
def measure_models(args, hypernet, weights):

    models = []
    m_conv1, m_conv2, m_linear = [], [], []
    arch = get_network(args)
    if args.hyper:
        with torch.no_grad():
            for i in range(100):
                model, weights = sample_fmodel(args, hypernet, arch)
                m_conv1.append(weights[0])
                m_conv2.append(weights[1])
                m_linear.append(weights[2])
            m_conv1 = torch.stack(m_conv1)
            m_conv2 = torch.stack(m_conv2)
            m_linear = torch.stack(m_linear)
            print(m_conv1.shape)
            print(m_conv2.shape)
            print(m_linear.shape)

            l2_c1, l2_c2, l2_lin = [], [], []
            for i in range(100):
                l2_c1.append(np.linalg.norm(m_conv1[i]))
                l2_c2.append(np.linalg.norm(m_conv2[i]))
                l2_lin.append(np.linalg.norm(m_linear[i]))
            print(l2_c1)
            print(l2_c2)
            print(l2_lin)
    else:
        with torch.no_grad():
            for i in range(len(weights)):
                m_conv1.append(weights[i]['conv1.0.weight'])
                m_conv2.append(weights[i]['conv2.0.weight'])
                m_linear.append(weights[i]['linear.weight'])
            m_conv1 = torch.stack(m_conv1)
            m_conv2 = torch.stack(m_conv2)
            m_linear = torch.stack(m_linear)
            print(m_conv1.shape)
            print(m_conv2.shape)
            print(m_linear.shape)

            l2_c1, l2_c2, l2_lin = [], [], []
            for i in range(len(weights)):
                l2_c1.append(np.linalg.norm(m_conv1[i]))
                l2_c2.append(np.linalg.norm(m_conv2[i]))
                l2_lin.append(np.linalg.norm(m_linear[i]))
            print(l2_c1)
            print(l2_c2)
            print(l2_lin)
    return
    print(m1_conv.shape, m1_linear.mean(0).shape)

    l1_conv = (m1_conv - m2_conv).abs().sum()
    l1_linear = (m1_linear - m2_linear).abs().sum()
    print("\nL1 Dist: {} - {}".format(l1_conv, l1_linear))

    l2_conv = np.linalg.norm(m1_conv - m2_conv)
    l2_linear = np.linalg.norm(m1_linear - m2_linear)
    print("L2 Dist: {} - {}".format(l2_conv, l2_linear))

    linf_conv = np.max((m1_conv - m2_conv).abs().cpu().numpy())
    linf_linear = np.max((m1_linear - m2_linear).abs().cpu().numpy())
    print("Linf Dist: {} - {}".format(linf_conv, linf_linear))

    cov_m1_m2_conv = np.cov(m1_conv, m2_conv)[0, 1]
    cov_m1_m2_linear = np.cov(m1_linear, m2_linear)[0, 1]
    print("Cov m1-m2: {} - {}".format(cov_m1_m2_conv, cov_m1_m2_linear))

    cov_m1_conv_linear = np.cov(m1_conv, m1_linear.mean(0))[0, 1]
    cov_m2_conv_linear = np.cov(m2_conv, m2_linear.mean(0))[0, 1]
    print("Cov m1-conv-linear: {} - {}\n\n".format(cov_m1_conv_linear,
                                                   cov_m2_conv_linear))

    return
    utils.plot_histogram(
        [m1_conv.view(-1).cpu().numpy(),
         m2_conv.view(-1).cpu().numpy()],
        save=False)
    utils.plot_histogram(
        [m1_linear.view(-1).cpu().numpy(),
         m2_linear.view(-1).cpu().numpy()],
        save=False)

    for l, name in [(m1_conv, 'conv1.0'), (m1_linear, 'linear')]:
        params = l.cpu().numpy()
        save_dir = 'params/mnist/{}/{}'.format(args.net, name)
        if not os.path.exists(save_dir):
            print("making ", save_dir)
            os.makedirs(save_dir)
        path = '{}/{}_{}.npy'.format(save_dir, name, i)
        print(i)
        print('saving param size: ', params.shape, 'to ', path)
        np.save(path, params)
Beispiel #17
0
def measure_models(args, hypernet, weights):

    models = []
    from scipy.optimize import curve_fit
    import matplotlib.pyplot as plt
    m_conv1, m_conv2, m_linear = [], [], []
    arch = get_network(args)
    with torch.no_grad():
        for i in range(1000):
            model, w = sample_fmodel(args, hypernet, arch)
            m_conv1.append(w[0])
            m_conv2.append(w[1])
            m_linear.append(w[2])
        m_conv1 = torch.stack(m_conv1)
        m_conv2 = torch.stack(m_conv2)
        m_linear = torch.stack(m_linear)
        print(m_conv1.shape)
        print(m_conv2.shape)
        print(m_linear.shape)

        l2_c1, l2_c2, l2_lin = [], [], []
        for i in range(1000):
            l2_c1.append(np.linalg.norm(m_conv1[i]))
            l2_c2.append(np.linalg.norm(m_conv2[i]))
            l2_lin.append(np.linalg.norm(m_linear[i]))
        print(np.array(l2_c1).mean(), np.array(l2_c1).std())
        print(np.array(l2_c2).mean(), np.array(l2_c2).std())
        print(np.array(l2_lin).mean(), np.array(l2_lin).std())

    with torch.no_grad():
        m_conv1, m_conv2, m_linear = [], [], []
        for i in range(len(weights)):
            m_conv1.append(weights[i]['conv1.0.weight'])
            m_conv2.append(weights[i]['conv2.0.weight'])
            m_linear.append(weights[i]['linear.weight'])
        m_conv1 = torch.stack(m_conv1)
        m_conv2 = torch.stack(m_conv2)
        m_linear = torch.stack(m_linear)
        print(m_conv1.shape)
        print(m_conv2.shape)
        print(m_linear.shape)

        ml2_c1, ml2_c2, ml2_lin = [], [], []
        for i in range(len(weights)):
            ml2_c1.append(np.linalg.norm(m_conv1[i]))
            ml2_c2.append(np.linalg.norm(m_conv2[i]))
            ml2_lin.append(np.linalg.norm(m_linear[i]))
        print(np.array(ml2_c1).mean(), np.array(ml2_c1).std())
        print(np.array(ml2_c2).mean(), np.array(ml2_c2).std())
        print(np.array(ml2_lin).mean(), np.array(ml2_lin).std())
    import matplotlib.mlab as mlab

    l2_c1 = np.array(l2_c1)
    n, bins, patches = plt.hist(l2_c1,
                                25,
                                normed=1,
                                facecolor='blue',
                                ec='black',
                                alpha=0.5,
                                label='HyperGAN')

    ml2_c1 = np.array(ml2_c1)
    n, bins, patches = plt.hist(ml2_c1,
                                25,
                                normed=1,
                                facecolor='red',
                                ec='black',
                                alpha=0.5,
                                label='standard')
    plt.legend(loc='best')
    plt.ylabel('Frequency')
    plt.xlabel('2-norm value')
    plt.grid(True)
    plt.title('Conv1 L2 norm')
    plt.show()

    return
    print(m1_conv.shape, m1_linear.mean(0).shape)

    l1_conv = (m1_conv - m2_conv).abs().sum()
    l1_linear = (m1_linear - m2_linear).abs().sum()
    print("\nL1 Dist: {} - {}".format(l1_conv, l1_linear))

    l2_conv = np.linalg.norm(m1_conv - m2_conv)
    l2_linear = np.linalg.norm(m1_linear - m2_linear)
    print("L2 Dist: {} - {}".format(l2_conv, l2_linear))

    linf_conv = np.max((m1_conv - m2_conv).abs().cpu().numpy())
    linf_linear = np.max((m1_linear - m2_linear).abs().cpu().numpy())
    print("Linf Dist: {} - {}".format(linf_conv, linf_linear))

    cov_m1_m2_conv = np.cov(m1_conv, m2_conv)[0, 1]
    cov_m1_m2_linear = np.cov(m1_linear, m2_linear)[0, 1]
    print("Cov m1-m2: {} - {}".format(cov_m1_m2_conv, cov_m1_m2_linear))

    cov_m1_conv_linear = np.cov(m1_conv, m1_linear.mean(0))[0, 1]
    cov_m2_conv_linear = np.cov(m2_conv, m2_linear.mean(0))[0, 1]
    print("Cov m1-conv-linear: {} - {}\n\n".format(cov_m1_conv_linear,
                                                   cov_m2_conv_linear))

    return
    utils.plot_histogram(
        [m1_conv.view(-1).cpu().numpy(),
         m2_conv.view(-1).cpu().numpy()],
        save=False)
    utils.plot_histogram(
        [m1_linear.view(-1).cpu().numpy(),
         m2_linear.view(-1).cpu().numpy()],
        save=False)

    for l, name in [(m1_conv, 'conv1.0'), (m1_linear, 'linear')]:
        params = l.cpu().numpy()
        save_dir = 'params/mnist/{}/{}'.format(args.net, name)
        if not os.path.exists(save_dir):
            print("making ", save_dir)
            os.makedirs(save_dir)
        path = '{}/{}_{}.npy'.format(save_dir, name, i)
        print(i)
        print('saving param size: ', params.shape, 'to ', path)
        np.save(path, params)
import numpy as np
import matplotlib.pyplot as plt
import utils

# load the data
all_data_uniform = np.load('uniform_r12t9.npy')
all_data_clustered = np.load('clustered_r12t9.npy')

# load corr coefs
rho_uniform = np.load('rho_uniform_1000.npy')
rho_clustered = np.load('rho_clustered_1000.npy')

# get vector representation of uniform corr coef
rho_uni_vec = utils.extract_all_corr_coef(rho_uniform)
rho_clus_vec = utils.extract_all_corr_coef(rho_clustered)

# get matrix of corr coefs per cluster
rho_per_cluster = utils.extract_cluster_corr_coef(rho_clustered)
rho_uniform_random = utils.extract_random_corr_coef(rho_uniform, size=len(rho_per_cluster))

# plot histogram
plt.ion()
utils.plot_histogram(rho_uniform_random, rho_per_cluster, binwidth=0.01, xlabel='Correlation same cluster')