Esempio n. 1
0
def basic_spatial_grid(ctrl):
    ax = graphs.spatial_grid(ctrl, unit='m', s=12)
    ax.set_xlabel('Meters')
    ax.set_ylabel('Meters')
    ax.set_aspect('equal')

    graphs.save('basic_spatial_grid')
    graphs.show()
Esempio n. 2
0
def zero_padded_crosscorr():
    fontsize_tmp = graphs.FONTSIZE
    graphs.change_fontsize(15)

    p = pinit64_no_pulse_shape()
    ax = graphs.crosscorr(p, savename='latex_figures/discrete_crosscorr');
    graphs.show()
    graphs.change_fontsize(fontsize_tmp)
    return ax
Esempio n. 3
0
def interd_cavg_vs_nodecount():
    # 150 sample per point
    graphs.GRAPH_OUTPUT_FORMAT = 'png'
    graphs.change_fontsize(graphs.FONTSIZE + 2)
    alldates = db.fetch_dates([20160825101515414, 20160825135628487]) # cavg vs nodecount

    dates = [alldates[0], alldates[-1]]
    ax = graphs.scatter_range(dates, ['nodecount', 'good_link_ratio'], color='k')
    ax.set_xlabel("Number of nodes $M$")
    ax.set_ylabel("$C$")
    graphs.save('interd_cavg_vs_nodecount')
    graphs.show()
Esempio n. 4
0
def interd_compare_quiet():
    graphs.GRAPH_OUTPUT_FORMAT = 'png'
    graphs.change_fontsize(graphs.FONTSIZE + 2)
    alldates = db.fetch_dates([20160822172521531, 20160822215536865]) # 960sims quiet compare
    dates = [alldates[0], alldates[-1]]

    labels = []
    labels.append('Random')
    labels.append('Clustering')
    labels.append('Sensing')
    ax = graphs.scatter_range(dates, ['max_dist_from_origin', 'good_link_ratio'], multiplot='quiet_selection', legend_labels=labels); 
    ax.set_xlabel("Side of square area (m)")
    ax.set_ylabel("$C$")

    graphs.save('interd_compare_quiet')
    graphs.show()
Esempio n. 5
0
def interd_dpll_vs_r12():
    # 150 sample per point
    graphs.GRAPH_OUTPUT_FORMAT = 'png'
    graphs.change_fontsize(graphs.FONTSIZE + 2)
    alldates = db.fetch_dates([20160828200017740, 20160828205450849]) # r12 vs dpll
    dates = [alldates[0], alldates[-1]]


    labels = []
    labels.append('R12')
    labels.append('DPLL')
    ax = graphs.scatter_range(dates, ['nodecount', 'good_link_ratio'], multiplot='peak_detect', legend_labels=labels) 
    ax.set_xlabel("Number of nodes (M)")
    ax.set_ylabel("$C$")
    graphs.save('interd_dpll_vs_r12')
    graphs.show()
Esempio n. 6
0
def interd_cavg_vs_distance():
    # 150 sample per point
    graphs.GRAPH_OUTPUT_FORMAT = 'png'
    graphs.change_fontsize(graphs.FONTSIZE + 2)
    alldates = db.fetch_dates([20160825141108531, 20160825183253474]) # cavg vs dist
    dates = [alldates[0], alldates[-1]]

    ax = graphs.scatter_range(dates, ['max_dist_from_origin', 'good_link_ratio'], color='k')
    ax.set_xlabel("Side of square area (Meters)")
    ax.set_ylabel("$C$")
    tick_locs = [500,750,1000,1250,1500]
    tick_lbls = list(map(str, tick_locs))
    ax.set_xticks(tick_locs)#, minor=False)
    ax.set_xticklabels(tick_lbls)
    graphs.save('interd_cavg_vs_distance')
    graphs.show()
Esempio n. 7
0
def kfold_train(learner_list,
                data,
                targets,
                folds,
                show_hair=True,
                display=True):
    """Trains the list of learners in parallel. PARALLEL NOT IMPLEMENTED YET
    All learners must be objects having the following function signature:
    <learner>.train(x_train, y_train, x_test, y_test, num_epochs, train_batchsize, display=False)

    learner_list: list of tuples: (learner_obj, args, kwargs)

    """
    if type(learner_list) != type(list()):
        raise ValueError(
            'Expected a list of learners as a first argument, even for single learners'
        )

    for l in learner_list:
        all_errors = []
        accuracies = []
        k = 0
        tf = time.clock
        t0 = tf()
        for x_train, y_train, x_test, y_test in stratified_folds(data,
                                                                 targets,
                                                                 folds=folds):
            if display:
                if k != 0:
                    logger.info('Fold ' + str(k - 1) + ': done in ' + "%2.2f" %
                                (tf() - t0) + ' sec')
                logger.info('Working on <' + type(l[0]).__name__ + '> fold ' +
                            str(k))
                t0 = tf()
            # If x/ydata is a string, assume it is a path and load it
            epochs_errors, test_acc = l[0].train(x_train, y_train, x_test,
                                                 y_test, *l[1], **l[2])
            all_errors.append(epochs_errors)
            accuracies.append(test_acc)
            k += 1

        logger.debug(accuracies)
        if show_hair:
            graphs.train_and_test(all_errors,
                                  legend_labels=('Training folds',
                                                 'Testing folds'))
            graphs.show()
Esempio n. 8
0
def interd_quiet_grids():

    graphs.GRAPH_OUTPUT_FORMAT = 'png'
    def init_simwrap():
        sim = SimWrap(ctrl, p, cdict, pdict);
        sim.set_all_nodisp()
        sim.TO_show_clusters = False
        sim.ctrl.saveall = False
        return sim

    def graph_mods():
        ax.set_xlabel('Meters')
        ax.set_ylabel('Meters')
        ax.set_aspect('equal')


    # Random grid
    graphs.change_fontsize(graphs.FONTSIZE + 2)
    ctrl, p, cdict, pdict = dec_grids()
    ctrl.quiet_selection = 'random'
    ctrl.quiet_nodes = ctrl.nodecount - 5
    ctrl.steps = 1
    sim = init_simwrap()
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        sim.simulate()
    ax = graphs.spatial_grid(ctrl, unit='m', s=12, show_broadcast=True)
    graph_mods()
    graphs.save('interd_grid_random')
    graphs.show()

    # cluster grid
    ctrl, p, cdict, pdict = dec_grids()
    ctrl.quiet_selection = 'kmeans'
    ctrl.quiet_nodes = ctrl.nodecount - 5
    ctrl.steps = 1
    sim = init_simwrap()
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        sim.simulate()
    ax = graphs.spatial_grid(ctrl, unit='m', s=12, show_broadcast=True)
    graph_mods()
    graphs.save('interd_grid_kmeans')
    graphs.show()

    # sensing grid
    ctrl, p, cdict, pdict = dec_grids()
    ctrl.quiet_selection = 'contention'
    ctrl.steps = 40
    sim = SimWrap(ctrl, p, cdict, pdict);
    sim.TO_show_clusters = False
    sim.ctrl.saveall = False
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        sim.simulate()
    ax = graphs.spatial_grid(ctrl, unit='m', s=12, show_broadcast=True)
    graph_mods()
    graphs.save('interd_grid_sensing')
    graphs.show()
Esempio n. 9
0
def interd_dpll_final_compare():
    #alldates = db.fetch_dates([20160829122809568, 20160829152128098]) # plain dpll vs contention
    graphs.GRAPH_OUTPUT_FORMAT = 'png'
    graphs.change_fontsize(15)
    alldates = db.fetch_dates([20160828200017740, 20160828205450849]) # r12 vs dpll
    dates = [alldates[0], alldates[-1]]

    #extra ( add to scatter_range)
    #sens_dates = db.fetch_dates([20160829122809568, 20160829152128098]) # plain dpll vs contention
    #new_fetch_dict = {'date':sens_dates, 'quiet_selection':['contention']}
    #raw_data = db.fetch_matching(new_fetch_dict, collist)
    #datalist.append(np.array(raw_data))
    #labels.append('Sensing')

    labels = []
    labels.append('R12')
    labels.append('DPLL')
    labels.append('DPLL-S')
    ax = graphs.scatter_range(dates, ['nodecount', 'good_link_ratio'], multiplot='peak_detect', legend_labels=labels) 
    ax.set_xlabel("Number of nodes (M)")
    ax.set_ylabel("$C$")
    graphs.save('interd_dpll_final_compare')
    graphs.show()
Esempio n. 10
0
def ml_full_3d(noise_var=1, fct=lib.ll_redux_2d):
    """Graphs a 3d surface of the specified ML fct"""
    p = ml_pinit_no_pulse_shape()

    points = 10
    t0 = 0
    d0 = 0
    t_halfwidth = 10
    d_halfwidth = 10

    t_min = t0-t_halfwidth
    t_max = t0+t_halfwidth
    d_min = d0-d_halfwidth
    d_max = d0+d_halfwidth

    t_step = t_halfwidth*2/points # int(round()) because integer samples
    theta_range = np.arange(t_min, t_max, t_step) 
    deltaf_range = np.arange(d_min, d_max,d_halfwidth*2/points)*1e-6 # Deltaf in ppm
    #deltaf_range = np.zeros(len(deltaf_range))
    
    loglike = fct(p,0,0,theta_range,deltaf_range, var_w=noise_var)

    #deltaf_range = np.arange(-1,1.1, 0.1)*1e-6
    # Plot preparations
    x = theta_range
    y = deltaf_range*1e6
    z = loglike*1e-3

    # Plot prameters
    fig, ax = graphs.surface3d(x, y, z, density=40)
    ax.set_xlabel('Time offset (samples)')
    ax.set_ylabel('CFO (ppm)')
    ax.set_zlabel('Log likelihood (1e3)')
    graphs.plt.tight_layout()
    
    graphs.show()
Esempio n. 11
0
    return r[0]


df = pd.read_csv('https://sololearn.com/uploads/files/titanic.csv')
df['male'] = df['Sex'] == 'male'
X = df[[
    'Pclass', 'male', 'Age', 'Siblings/Spouses', 'Parents/Children', 'Fare'
]].values
y = df['Survived'].values

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5)

model = LogisticRegression()
model.fit(X_train, y_train)

# Adjusting the threshhold
# y_pred = model.predict_proba(X_test)[:, 1] > 0.75
y_pred_proba = model.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba[:, 1])

plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('1 - specificity')
plt.ylabel('sensitivity')
plt.show()

# print("sensitivity:", sensitivity_score(y_test, y_pred))
# print("specificity:", specificity_score(y_test, y_pred))
Esempio n. 12
0
    nr = ("number of relations", "Number of similarity relations")

    # Similarities at highest in-degree threshold
    s1 = selectedSimilarities(data)[-1]

    pl.init()
    wordSimScatterPlot(s1, ws353, "$1 - L_1$", "WS-353")
    wordSimScatterPlot(s1, mt287, "$1 - L_1$", "MT-287")
    wordSimScatterPlot(s1, mt771, "$1 - L_1$", "MT-771")
    wordSimScatterPlot(s1, simlex999, "$1 - L_1$", "SIMLEX-999")
    level0Plot(data, md, rt)
    level0Plot(data, md, nv)
    level0Plot(data, md, ne)
    histograms(data, "relative l1 norm", "Relative $\mathrm{L_{1}}$ norm")
    histograms(data, "max error", "Error bound")
    plotLevel0VsLevel1Stats(data, "max in-degree", "relative l1 norm",
                            "In-degree threshold",
                            "Relative $\mathrm{L_{1}}$ norm")
    plotLevel0VsLevel1Stats(data, "max in-degree", "max error",
                            "In-degree threshold", "Error bound")
    correlationsVsLevel0Prop(data, "max in-degree", "In-degree threshold",
                             [ws353, mt287, mt771, simlex999],
                             ["WS-353", "MT-287", "MT-771", "SIMLEX-999"])
    if show:
        pl.show()

    # Save figures under figure path, will not work if figures have already been displayed with .show()
    for i in plt.get_fignums():
        plt.figure(i)
        plt.savefig(figurePath + '/' + experiment_name + '-figure%d.pdf' % i)
Esempio n. 13
0
def highlited_regimes():
    """Highlights the converging vs the drift regime"""
    compare_fontsize = 15
    compare_aspect=12
    props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)

    fontsize_tmp = graphs.FONTSIZE
    def run_hair_graph(aspect='auto'):
        hair_kwargs = {'y_label':r'$\theta_i$ $(T_0)$', 'show_clusters':False, 'savename':''}
        hair_args = (
                (ctrl.sample_inter , ctrl.theta_inter, ctrl),
                hair_kwargs,
        )
        ax =  graphs.hair(*hair_args[0], **hair_args[1])
        ax.set_aspect(aspect)
        return ax

    # DRIFT REGIME
    graphs.change_fontsize(compare_fontsize)
    ctrl, p, cdict, pdict = dec_sample_theta()
    sim = SimWrap(ctrl, p, cdict, pdict)

    sim.conv_min_slope_samples = 15 
    sim.ctrl.keep_intermediate_values = True
    sim.simulate()
    ax = run_hair_graph(aspect=compare_aspect)
    ax.text(0.85, 0.9, 'No DC', transform=ax.transAxes, fontsize=compare_fontsize, verticalalignment='top', bbox=props) 
    xmid = 12
    xmax = ctrl.steps-13
    xmin = 0
    ymin, ymax = ax.get_ylim()
    ya = ymin-0.06
    ax.set_xlim([xmin,xmax])
    ax.set_ylim([ya,ymax])
    fname = 'latex_figures/init_theta'
    graphs.save(fname)
    graphs.show()

    del ax
    graphs.change_fontsize(15)
    ax = run_hair_graph()
    
    # Draw biarrows
    ax.annotate('', xy=(xmid, ya), xycoords='data',
                xytext=(xmin, ya), textcoords='data',
                arrowprops=dict(arrowstyle="<->"))
    ax.annotate('', xy=(xmax, ya), xycoords='data',
                xytext=(xmid, ya), textcoords='data',
                arrowprops=dict(arrowstyle="<->"))
    ax.plot([xmid, xmid], [ya-1, ymax], 'k--')

    # text
    ax.text(xmid/2, ya, 'Transient', ha='center', va='bottom' )
    ax.text((xmax-xmid)/2 +xmid, ya, 'Drifting', ha='center', va='bottom' )

    ax.set_xlim([xmin,xmax])
    ax.set_ylim([ya-0.03,ymax])

    fname = 'latex_figures/highlighted_regimes'
    graphs.save(fname)
    graphs.show()

    # THETA EXAMPLE
    graphs.change_fontsize(compare_fontsize)
    ctrl, p, cdict, pdict = dec_sample_theta()
    ctrl.prop_correction = True
    sim = SimWrap(ctrl, p, cdict, pdict)

    sim.conv_min_slope_samples = 15 
    sim.ctrl.keep_intermediate_values = True
    sim.show_CFO = False
    sim.show_TO = False
    sim.make_cat = False
    sim.simulate()
    ax = run_hair_graph(aspect=compare_aspect)
    ax.text(0.85, 0.9, 'Q = 7', transform=ax.transAxes, fontsize=compare_fontsize, verticalalignment='top', bbox=props) 

    ax.set_xlim([xmin,xmax])
    ax.set_ylim([ya,ymax])

    fname = 'latex_figures/example_theta'
    graphs.save(fname)
    graphs.show()

    # THETA EXAMPLE with Q=14
    graphs.change_fontsize(compare_fontsize)
    ctrl, p, cdict, pdict = dec_sample_theta()
    ctrl.prop_correction = True
    ctrl.pc_b, ctrl.pc_a = lib.hipass_avg(14)
    sim = SimWrap(ctrl, p, cdict, pdict)

    sim.conv_min_slope_samples = 15 
    sim.ctrl.keep_intermediate_values = True
    sim.show_CFO = False
    sim.show_TO = False
    sim.make_cat = False
    sim.simulate()
    ax = run_hair_graph(aspect=compare_aspect)
    ax.text(0.85, 0.9, 'Q = 14', transform=ax.transAxes, fontsize=compare_fontsize, verticalalignment='top', bbox=props) 

    ax.set_xlim([xmin,xmax])
    ax.set_ylim([ya,ymax])

    fname = 'latex_figures/example_theta_q14'
    graphs.save(fname)
    graphs.show()


    
    graphs.change_fontsize(fontsize_tmp)
Esempio n. 14
0
def ml_full_one(variable='CFO',noise_var=1):
    """Graphs only 1 axis of the log ML"""
    p = ml_pinit()

    points = 1000
    t0 = 10
    d0 = 0
    t_halfwidth = 100
    d_halfwidth = 1

    t_min = t0-t_halfwidth
    t_max = t0+t_halfwidth
    d_min = d0-d_halfwidth
    d_max = d0+d_halfwidth

    t_step = max(int(round(t_halfwidth*2/points)),1) # int(round()) because integer samples
    theta_range = np.arange(t_min, t_max, t_step) 
    deltaf_range = np.arange(d_min, d_max,d_halfwidth*2/points)*1e-6 # Deltaf in ppm

    
    # x & z preparation
    if variable == 'CFO':
        loglike = lib.loglikelihood_1d_CFO(p,t0,d0,deltaf_range, var_w=noise_var)
        x = deltaf_range*1e6
        xlabel = 'CFO (ppm)'
    elif variable == 'TO':
        loglike = lib.loglikelihood_1d_TO(p,t0,d0,theta_range, var_w=noise_var)
        x = theta_range
        xlabel = 'TO (samples)'
    z = loglike*1e-3

    # Plot prametersZ
    ax = graphs.continuous(x, z)
    ax.set_xlabel(xlabel)
    ax.set_ylabel('Log likelihood (1e3)')
    graphs.plt.tight_layout()

    
    graphs.show()


    p = ml_pinit()

    points = 40
    t0 = 0
    d0 = 0
    t_halfwidth = 10
    d_halfwidth = 1

    t_min = t0-t_halfwidth
    t_max = t0+t_halfwidth
    d_min = d0-d_halfwidth
    d_max = d0+d_halfwidth

    t_step = t_halfwidth*2/points # int(round()) because integer samples
    theta_range = np.arange(t_min, t_max, t_step) 
    deltaf_range = np.arange(d_min, d_max,d_halfwidth*2/points)*1e-6 # Deltaf in ppm
    #deltaf_range = np.zeros(len(deltaf_range))
    
    loglike = lib.ll_redux_2d(p,0,0,theta_range,deltaf_range, var_w=noise_var)

    #deltaf_range = np.arange(-1,1.1, 0.1)*1e-6
    # Plot preparations
    x = theta_range
    y = deltaf_range*1e6
    z = loglike*1e-3

    # Plot prameters
    fig, ax = graphs.surface3d(x, y, z, density=40)
    ax.set_xlabel('Time offset (samples)')
    ax.set_ylabel('CFO (ppm)')
    ax.set_zlabel('Log likelihood (1e3)')
    graphs.plt.tight_layout()
    
    graphs.show()