Example #1
0
def runClient(host, port, clients):
    """Runs the java ThreadClient with clients threads against server running on host:port.
    Returns the requests per second as a float."""

    latency_out = tempfile.NamedTemporaryFile()
    child = subprocess.Popen(CLIENT + (host, str(port), str(clients), latency_out.name), stdout=subprocess.PIPE)
    output = ""
    for line in child.stdout:
        output += line
    error = child.wait()
    assert error == 0

    latency_f = open(latency_out.name)
    latencies = [int(l) for l in latency_f]
    latency_f.close()
    latency_out.close()

    average_latency, median_latency, stddev, min, max, latency_confidence = statistics.stats(latencies)

    parts = output.split()
    assert parts[-1] == "us"
    assert parts[-5] == "msgs/s"
    throughput = float(parts[-6])

    return throughput, average_latency, median_latency, latency_confidence
Example #2
0
    def run(self, wantpdf=False):
        self.enqueue(Arrival(self))

        self.start_loop()

        sys.stdout.write("[%8s] %4d rate, %6d blocks, %6d arrivals [%.04f]\r" % (
            self.methods[self.method],
            1.0 / self.lambda_arr, self.blocks, self.arrivals, float(self.blocks) / float(self.arrivals)
        ))

        if wantpdf:
            sys.stdout.write('\n')

        sys.stdout.flush()

        while not wantpdf:
            avg, median, std, min, max, conf = stats(self.interferences)

            if conf >= 0.05:
                self.iterations *= 2

                sys.stdout.write("Doubling iterations %d\r" % self.iterations)
                sys.stdout.flush()
                self.start_loop()
            else:
                sys.stdout.write("[%8s] %4d rate, %.6f interference, %6d blocks, %6d arrivals [%.04f]\r" % (
                    self.methods[self.method],
                    1.0 / self.lambda_arr, avg, self.blocks, self.arrivals, float(self.blocks) / float(self.arrivals)
                ))
                sys.stdout.write('\n')
                sys.stdout.flush()
                return (avg, median, std, min, max, conf)

        # If we are here we are just interested in the pdf
        # So just returns the results

        return (self.distances, self.interferences)
Example #3
0
def mean_confidence_interval(data, confidence=0.95):
    mean, _, _, _, _, h = statistics.stats(data, 1-confidence)
    return mean, h
Example #4
0
    input = server_type + ".csv"
    data = open(input)
    reader = csv.reader(data)
    table = []
    for row in reader:
        table.append(row)
    data.close()

    scatter = [('Simultaneous Clients', 'Messages/s')]
    average = [('Simultaneous Clients', server_type, '-95% confidence', '+95% confidence')]
    for row in table:
        x = row[0]
        for y in row[1:]:
            scatter.append((x, y))

        stats = statistics.stats([float(f) for f in row[1:]])
        average.append((x, stats[0], stats[0]-stats[-1], stats[0]+stats[-1]))

    options = {
        'plottype': 'points',
        'key': False,
        'ylabel': scatter[0][1],
    }
    stupidplot.gnuplotTable(scatter, server_type + "-scatter.eps", options)

    averages.append(average)

options = {
    #~ 'plottype': 'points',
    #~ 'key': False,
    'ylabel': 'Messages/s',
Example #5
0
def plot_stacked_bar2(index1,stats,alg,xlabel):
    out_label = [5, 10, 20, 40, 60, 80, 100, 120]
    n_groups = len(out_label)
    fig = plt.figure(index1)
    index = np.arange(n_groups)

    ax = fig.add_axes([0.15, 0.17, 0.8, 0.75])
    plt.gca().yaxis.grid(True)
    plt.gca().xaxis.grid(False)


    plt.xlabel('Period of packet generation (sec)', fontsize=f_size - 6)

    CellType = ('Shared', 'TX', 'RX','Used TX','Used RX')
    CellTypeB = ('TX','RX')

    metrics=['Alloc_Cells','Used_Cells']



    plt.ylabel("Avg Cells per SlotFrame", fontsize=f_size - 6)


    # x-axis configuration
    x=[]
    for i in range(0, len(xlabel)):
        if xlabel[i] in out_label:
            x.append(xlabel[i])

    plt.xticks(x, out_label, fontsize=f_size - 8, rotation='vertical')
    plt.yticks(fontsize=f_size - 8)

    y_avg=[]
    y_conf = []
    st = []

    for  i in range(0, 5):
        t1=[]
        t2=[]
        y_avg.append(t1)
        y_conf.append(t2)

    z=0
    for j in range(0, len(xlabel)):

            if xlabel[j] in out_label:
                t3=[]
                st.append(t3)

                for l in range(0,5):
                    t4=[]
                    st[z].append(t4)

                for v in range(0,len(metrics)):
                    for k in range(0, len(stats[alg][xlabel[j]][metrics[v]])):

                        for cell_i in range(0, len(stats[alg][xlabel[j]][metrics[v]][k])):


                            st[z][v*3+cell_i].append(stats[alg][xlabel[j]][metrics[v]][k][cell_i])
                            #print z,cell_i,st[z][cell_i]


                z += 1


    for i in range(0, len(out_label)):

        for j in range(0,5):

            average, median, standard_deviation, minimum, maximum, confidence = statistics.stats(st[i][j], 0.05)
            #
            #print average, median, standard_deviation, minimum, maximum, confidence
            y_conf[j].append(confidence)
            y_avg[j].append(average)



    plt.grid(linestyle='dashed', linewidth=0.5, axis='y')
    error_config = {'elinewidth': '0.1'}  # 'ecolor': '0.3',

    dataset1 = np.array(y_avg[0])
    dataset2 = np.array(y_avg[1])
    dataset3 = np.array(y_avg[2])
    # Used Cells
    dataset4 = np.array(y_avg[3])
    dataset5 = np.array(y_avg[4])

    p1 = plt.bar(index + 0.32 + bar_width, y_avg[0], bar_width, alpha=opacity, color=my_color[0],
                 error_kw=dict(lw=0.5, capsize=3, capthick=0.5, color=0.3), label=CellType[0], linewidth=1,
                edgecolor='grey',yerr=y_conf[0])#,

    p2 = plt.bar(index + 0.32 + bar_width, y_avg[1], bar_width, alpha=opacity, color=my_color[1],bottom = dataset1,
                     error_kw=dict(lw=0.5, capsize=3, capthick=0.5, color=0.3), label=CellType[1],
                    linewidth=1,edgecolor='grey',yerr=y_conf[1])#,

    p3 = plt.bar(index + 0.32 + bar_width, y_avg[2], bar_width, alpha=opacity, color=my_color[2], bottom=dataset1+dataset2,
                 error_kw=dict(lw=0.5, capsize=3, capthick=0.5, color=0.3), label=CellType[2],
                 linewidth=1, edgecolor='grey',yerr=y_conf[2])  # ,
    # --- Used Cells ---

    p4 = plt.bar(index  +0.32 +2*bar_width+0.05, y_avg[3], bar_width, alpha=opacity, color=my_color[1],
                 error_kw=dict(lw=0.5, capsize=3, capthick=0.5, color=0.3), label=CellType[3], linewidth=1,
                edgecolor='grey',yerr=y_conf[3],hatch="//")#,

    p5 = plt.bar(index  +0.32+ 2*bar_width+0.05, y_avg[4], bar_width, alpha=opacity, color=my_color[2],bottom = dataset4,
                     error_kw=dict(lw=0.5, capsize=3, capthick=0.5, color=0.3), label=CellType[4],
                    linewidth=1,edgecolor='grey',yerr=y_conf[4],hatch="//")#,




    plt.tick_params(axis='y', which='major', labelsize=13)
    plt.tick_params(axis='x', which='major', labelsize=11)
    plt.yticks(fontsize=f_size - 6)
    plt.xticks(fontsize=f_size - 6)

    plt.title(alg)


    plt.legend(loc='upper right',  prop={'size': 10})


    plt.xticks(index + 0.32 + bar_width, out_label, fontsize=f_size-6)#,2.5
    locs, labels = plt.xticks()

    plt.savefig(path +"Stacked_bar" +"_"+str(alg)+str(index1) +'.png', format='png')
Example #6
0
def read_values( file ):
    print "Reading file %s..."%file
    f=open(file,"r")
    xvalues = []
    yvalues = []
    errvalues = []
    values = {}  # mapping "x" values to a set of "y" values { x : { y : [...] , y: [...] },... }
    header = None
    
    try:
        for line in f.readlines():
            line = line.strip()
            
            # see if needs a header line
            if options.header and header is None:
                    header=line.split(options.delimiter)
                    # map the name of the header to the index in the array
                    header = dict(itertools.izip(header,xrange(len(header))))
                    print header
                    continue
            
            # skip comments
            if line.startswith("#"):
                continue
            
            l = line.split(",")
            
            # first, read the x-value
            if header and options.x_pos in header:
                x = float(l[ header[options.x_pos] ] )
            else:
                x = float(l[ int(options.x_pos)] )
                    
            if options.normx:
                x=x/float(l[options.normx])
            
            if x not in values:
                values[x] = {}
                print "x:",x
                    
            # now, match the x-value to the needed y-values
            if options.y_pos:
                cols = options.y_pos.split(",")
                # for each y-value we expect two values - the location and the action..for now, we do not really care about the action
                for i in xrange( 0,len(cols),2 ):
                    if header and cols[i] in header:
                        pos = header[cols[i]]
                        
                    else:
                        pos = int(cols[i]) 
                        
                    y = float(l[ pos ] )
                    
                    if not options.miny or y>=options.miny:
                        if options.normy:
                            y=y/float(l[options.normy])
                    
                    # ok, now we have the y value - let's add it to the right place
                        
                    if cols[i] not in values[x]:
                        values[x][cols[i]] = []
                        
                    # add the value to the right place
                    values[x][cols[i]].append( y )
                    
    except:
        print "bad line."
            
    print "done."

    # now, let's see what we can do with the oprations. For each y operation we need to perform that on the list of matching values
    # overall, each "y" element should have a single number so we can plot them 
    cols = options.y_pos.split(",")
    for i in xrange( 0,len(cols),2 ):
        if header and cols[i] in header:
            pos = header[cols[i]]
        else:
            pos = int(cols[i]) 
            
        op = cols[i+1]
        
        for x in sorted(values.keys()):
            old_values = values[x][cols[i]]
            average, median, standard_deviation, minimum, maximum, confidence = statistics.stats( numpy.array(old_values) )
            if op=="avg":
                new_value = average
                values[x][cols[i]+"_std"] = standard_deviation
                values[x][cols[i]+"_ci"] = confidence
            elif op=="normal":
                new_value = old_values[0]
            elif op=="max":
                new_value = maximum
            elif op=="min":
                new_value = minimum
                
            # set the new (single) value for that given x and y_pos
            values[x][cols[i]] = new_value
    
    return values
Example #7
0
def learn(env,
          policy_func,
          reward_giver,
          expert_dataset,
          rank,
          pretrained,
          pretrained_weight,
          *,
          g_step,
          d_step,
          entcoeff,
          save_per_iter,
          ckpt_dir,
          log_dir,
          timesteps_per_batch,
          task_name,
          gamma,
          lam,
          max_kl,
          cg_iters,
          cg_damping=1e-2,
          vf_stepsize=3e-4,
          d_stepsize=3e-4,
          vf_iters=3,
          max_timesteps=0,
          max_episodes=0,
          max_iters=0,
          callback=None):

    nworkers = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    np.set_printoptions(precision=3)
    # Setup losses and stuff
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi",
                     ob_space,
                     ac_space,
                     reuse=(pretrained_weight != None))
    oldpi = policy_func("oldpi", ob_space, ac_space)
    atarg = tf.placeholder(
        dtype=tf.float32,
        shape=[None])  # Target advantage function (if applicable)
    ret = tf.placeholder(dtype=tf.float32, shape=[None])  # Empirical return

    ob = U.get_placeholder_cached(name="ob")
    ac = pi.pdtype.sample_placeholder([None])

    kloldnew = oldpi.pd.kl(pi.pd)
    ent = pi.pd.entropy()
    meankl = tf.reduce_mean(kloldnew)
    meanent = tf.reduce_mean(ent)
    entbonus = entcoeff * meanent

    vferr = tf.reduce_mean(tf.square(pi.vpred - ret))

    ratio = tf.exp(pi.pd.logp(ac) -
                   oldpi.pd.logp(ac))  # advantage * pnew / pold
    surrgain = tf.reduce_mean(ratio * atarg)

    optimgain = surrgain + entbonus
    losses = [optimgain, meankl, entbonus, surrgain, meanent]
    loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]

    dist = meankl

    all_var_list = pi.get_trainable_variables()
    var_list = [
        v for v in all_var_list
        if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")
    ]
    vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")]
    assert len(var_list) == len(vf_var_list) + 1
    d_adam = MpiAdam(reward_giver.get_trainable_variables())
    vfadam = MpiAdam(vf_var_list)

    get_flat = U.GetFlat(var_list)
    set_from_flat = U.SetFromFlat(var_list)
    klgrads = tf.gradients(dist, var_list)
    flat_tangent = tf.placeholder(dtype=tf.float32,
                                  shape=[None],
                                  name="flat_tan")
    shapes = [var.get_shape().as_list() for var in var_list]
    start = 0
    tangents = []
    for shape in shapes:
        sz = U.intprod(shape)
        tangents.append(tf.reshape(flat_tangent[start:start + sz], shape))
        start += sz
    gvp = tf.add_n([
        tf.reduce_sum(g * tangent)
        for (g, tangent) in zipsame(klgrads, tangents)
    ])  # pylint: disable=E1111
    fvp = U.flatgrad(gvp, var_list)

    assign_old_eq_new = U.function(
        [], [],
        updates=[
            tf.assign(oldv, newv)
            for (oldv,
                 newv) in zipsame(oldpi.get_variables(), pi.get_variables())
        ])
    compute_losses = U.function([ob, ac, atarg], losses)
    compute_lossandgrad = U.function([ob, ac, atarg], losses +
                                     [U.flatgrad(optimgain, var_list)])
    compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
    compute_vflossandgrad = U.function([ob, ret],
                                       U.flatgrad(vferr, vf_var_list))

    @contextmanager
    def timed(msg):
        if rank == 0:
            print(colorize(msg, color='magenta'))
            tstart = time.time()
            yield
            print(
                colorize("done in %.3f seconds" % (time.time() - tstart),
                         color='magenta'))
        else:
            yield

    def allmean(x):
        assert isinstance(x, np.ndarray)
        out = np.empty_like(x)
        MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
        out /= nworkers
        return out

    U.initialize()
    th_init = get_flat()
    MPI.COMM_WORLD.Bcast(th_init, root=0)
    set_from_flat(th_init)
    d_adam.sync()
    vfadam.sync()
    if rank == 0:
        print("Init param sum", th_init.sum(), flush=True)

    # Prepare for rollouts
    # ----------------------------------------
    seg_gen = traj_segment_generator(pi,
                                     env,
                                     reward_giver,
                                     timesteps_per_batch,
                                     stochastic=True)

    episodes_so_far = 0
    timesteps_so_far = 0
    iters_so_far = 0
    tstart = time.time()
    lenbuffer = deque(maxlen=40)  # rolling buffer for episode lengths
    rewbuffer = deque(maxlen=40)  # rolling buffer for episode rewards
    true_rewbuffer = deque(maxlen=40)

    assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1

    g_loss_stats = stats(loss_names)
    d_loss_stats = stats(reward_giver.loss_name)
    ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
    # if provide pretrained weight
    if pretrained_weight is not None:
        U.load_state(pretrained_weight, var_list=pi.get_variables())

    while True:
        if callback: callback(locals(), globals())
        if max_timesteps and timesteps_so_far >= max_timesteps:
            break
        elif max_episodes and episodes_so_far >= max_episodes:
            break
        elif max_iters and iters_so_far >= max_iters:
            break

        # Save model
        if rank == 0 and iters_so_far % save_per_iter == 0 and ckpt_dir is not None:
            fname = os.path.join(ckpt_dir, task_name)
            os.makedirs(os.path.dirname(fname), exist_ok=True)
            saver = tf.train.Saver()
            saver.save(tf.get_default_session(), fname)

        logger.log("********** Iteration %i ************" % iters_so_far)

        # global flag_render
        # if iters_so_far > 0 and iters_so_far % 10 ==0:
        #     flag_render = True
        # else:
        #     flag_render = False

        def fisher_vector_product(p):
            return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p

        # ------------------ Update G ------------------
        logger.log("Optimizing Policy...")
        for _ in range(g_step):
            with timed("sampling"):
                seg = seg_gen.__next__()
            print('rewards', seg['rew'])
            add_vtarg_and_adv(seg, gamma, lam)
            # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
            ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[
                "tdlamret"]
            vpredbefore = seg[
                "vpred"]  # predicted value function before udpate
            atarg = (atarg - atarg.mean()) / atarg.std(
            )  # standardized advantage function estimate

            if hasattr(pi, "ob_rms"):
                pi.ob_rms.update(ob)  # update running mean/std for policy

            args = seg["ob"], seg["ac"], atarg
            fvpargs = [arr[::5] for arr in args]

            assign_old_eq_new(
            )  # set old parameter values to new parameter values
            with timed("computegrad"):
                *lossbefore, g = compute_lossandgrad(*args)
            lossbefore = allmean(np.array(lossbefore))
            g = allmean(g)
            if np.allclose(g, 0):
                logger.log("Got zero gradient. not updating")
            else:
                with timed("cg"):
                    stepdir = cg(fisher_vector_product,
                                 g,
                                 cg_iters=cg_iters,
                                 verbose=rank == 0)
                assert np.isfinite(stepdir).all()
                shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
                lm = np.sqrt(shs / max_kl)
                # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
                fullstep = stepdir / lm
                expectedimprove = g.dot(fullstep)
                surrbefore = lossbefore[0]
                stepsize = 1.0
                thbefore = get_flat()
                for _ in range(10):
                    thnew = thbefore + fullstep * stepsize
                    set_from_flat(thnew)
                    meanlosses = surr, kl, *_ = allmean(
                        np.array(compute_losses(*args)))
                    improve = surr - surrbefore
                    logger.log("Expected: %.3f Actual: %.3f" %
                               (expectedimprove, improve))
                    if not np.isfinite(meanlosses).all():
                        logger.log("Got non-finite value of losses -- bad!")
                    elif kl > max_kl * 1.5:
                        logger.log("violated KL constraint. shrinking step.")
                    elif improve < 0:
                        logger.log("surrogate didn't improve. shrinking step.")
                    else:
                        logger.log("Stepsize OK!")
                        break
                    stepsize *= .5
                else:
                    logger.log("couldn't compute a good step")
                    set_from_flat(thbefore)
                if nworkers > 1 and iters_so_far % 20 == 0:
                    paramsums = MPI.COMM_WORLD.allgather(
                        (thnew.sum(),
                         vfadam.getflat().sum()))  # list of tuples
                    assert all(
                        np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
            with timed("vf"):
                for _ in range(vf_iters):
                    for (mbob, mbret) in dataset.iterbatches(
                        (seg["ob"], seg["tdlamret"]),
                            include_final_partial_batch=False,
                            batch_size=128):
                        if hasattr(pi, "ob_rms"):
                            pi.ob_rms.update(
                                mbob)  # update running mean/std for policy
                        g = allmean(compute_vflossandgrad(mbob, mbret))
                        vfadam.update(g, vf_stepsize)

        g_losses = meanlosses
        for (lossname, lossval) in zip(loss_names, meanlosses):
            logger.record_tabular(lossname, lossval)
        logger.record_tabular("ev_tdlam_before",
                              explained_variance(vpredbefore, tdlamret))
        # ------------------ Update D ------------------
        logger.log("Optimizing Discriminator...")
        logger.log(fmt_row(13, reward_giver.loss_name))
        ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob))
        batch_size = len(ob) // d_step
        d_losses = [
        ]  # list of tuples, each of which gives the loss for a minibatch
        for ob_batch, ac_batch in dataset.iterbatches(
            (ob, ac), include_final_partial_batch=False,
                batch_size=batch_size):
            ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob_batch))
            # update running mean/std for reward_giver
            if hasattr(reward_giver, "obs_rms"):
                reward_giver.obs_rms.update(
                    np.concatenate((ob_batch, ob_expert), 0))
            *newlosses, g = reward_giver.lossandgrad(ob_batch, ac_batch,
                                                     ob_expert, ac_expert)
            d_adam.update(allmean(g), d_stepsize)
            d_losses.append(newlosses)
        logger.log(fmt_row(13, np.mean(d_losses, axis=0)))

        lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]
                   )  # local values
        listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)  # list of tuples
        lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
        true_rewbuffer.extend(true_rets)
        lenbuffer.extend(lens)
        rewbuffer.extend(rews)

        logger.record_tabular("EpLenMean", np.mean(lenbuffer))
        logger.record_tabular("EpRewMean", np.mean(rewbuffer))
        logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer))
        logger.record_tabular("EpThisIter", len(lens))
        episodes_so_far += len(lens)
        timesteps_so_far += sum(lens)
        iters_so_far += 1

        logger.record_tabular("EpisodesSoFar", episodes_so_far)
        logger.record_tabular("TimestepsSoFar", timesteps_so_far)
        logger.record_tabular("TimeElapsed", time.time() - tstart)

        if rank == 0:
            logger.dump_tabular()
Example #8
0
def calc_student_ttest_result(a, b, confidence):
    result = {}
    #     a = [1,3,5,17,9]
    #     b = [12,4,6,8,10,41]

    result['group_1_N'] = len(a)
    result['group_2_N'] = len(b)
    if len(a) < 2 or len(b) < 2:
        result['group_1_mean'] = "-1"
        result['group_1_std'] = "-1"
        result['group_1_std_error'] = '-1'
        result['group_2_mean'] = "-1"
        result['group_2_std'] = "-1"
        result['group_2_std_error'] = '-1'
        result['group_unequal_low'] = "-1"
        result['group_unequal_up'] = "-1"
        result['group_equal_low'] = "-1"
        result['group_equal_up'] = "-1"
        result['group_equal_t'] = "-1"
        result['group_equal_p'] = "-1"
        result['group_equal_free_degree'] = "-1"
        result['group_unequal_t'] = "-1"
        result['group_unequal_p'] = "-1"
        result['group_unequal_free_degree'] = "-1"
        result['group_equal_mean_error'] = "-1"
        result['group_unequal_mean_error'] = "-1"
        result['F'] = "-1"
        result['sig'] = "-1"
        result['group_unequal_std_error'] = "-1"
        result['group_equal_std_error'] = "-1"
        return result

    mean1, _, stddev1, _, _, _ = statistics.stats(a, confidence)
    result['group_1_mean'] = utils.get_Decimal_float(mean1)
    result['group_1_std'] = utils.get_Decimal_float(stddev1)
    result['group_1_std_error'] = utils.get_Decimal_float(stddev1 /
                                                          math.sqrt(len(a)))

    mean2, _, stddev2, _, _, _ = statistics.stats(b, confidence)
    result['group_2_mean'] = utils.get_Decimal_float(mean2)
    result['group_2_std'] = utils.get_Decimal_float(stddev2)
    result['group_2_std_error'] = utils.get_Decimal_float(stddev2 /
                                                          math.sqrt(len(b)))

    import statsmodels.stats.api as sms
    cm = sms.CompareMeans(sms.DescrStatsW(a), sms.DescrStatsW(b))
    tconfint_diff = cm.tconfint_diff(alpha=1.0 - confidence, usevar='unequal')
    result['group_unequal_low'] = utils.get_Decimal_float(tconfint_diff[0])
    result['group_unequal_up'] = utils.get_Decimal_float(tconfint_diff[1])
    tconfint_diff = cm.tconfint_diff(alpha=1.0 - confidence, usevar='pooled')
    result['group_equal_low'] = utils.get_Decimal_float(tconfint_diff[0])
    result['group_equal_up'] = utils.get_Decimal_float(tconfint_diff[1])
    import statsmodels.api as sm
    ttest_int_result = sm.stats.ttest_ind(a, b, usevar='pooled')
    result['group_equal_t'] = utils.get_Decimal_float(ttest_int_result[0])
    result['group_equal_p'] = utils.get_Decimal_float(ttest_int_result[1])
    result['group_equal_free_degree'] = Decimal(ttest_int_result[2])
    ttest_int_result = sm.stats.ttest_ind(a, b, usevar='unequal')
    result['group_unequal_t'] = utils.get_Decimal_float(ttest_int_result[0])
    result['group_unequal_p'] = utils.get_Decimal_float(ttest_int_result[1])
    result['group_unequal_free_degree'] = utils.get_Decimal_float(
        ttest_int_result[2])
    result['group_equal_mean_error'] = result['group_1_mean'] - result[
        'group_2_mean']
    result['group_unequal_mean_error'] = result['group_1_mean'] - result[
        'group_2_mean']
    from scipy.stats import levene
    ttest_levene = levene(a, b, center='trimmed')
    result['F'] = utils.get_Decimal_float(ttest_levene[0])
    result['sig'] = utils.get_Decimal_float(ttest_levene[1])

    result['group_unequal_std_error'] = utils.get_Decimal_float(
        math.sqrt(stddev1 * stddev1 / len(a) + stddev2 * stddev2 / len(b)))
    #error
    result['group_equal_std_error'] = utils.get_Decimal_float(
        math.sqrt(stddev1 * stddev1 / len(a) + stddev2 * stddev2 / len(b)))
    return result
ad_sc = [x.split(",")[0] for x in open("./method3/csv/adherent_sc_pwl.csv").readlines()]
ad_not_net = [x.strip().split(",")[0] for x in open("./method3/csv/adherent_not_networked_pwl.csv").readlines()]

nonad = [x.split(",")[0] for x in open("./master_nonadherent.csv").readlines()[1:]]

nonad_giant = [x.split(",")[0] for x in open("./method3/csv/nonadherent_giant_pwl.csv").readlines()]
nonad_sc = [x.split(",")[0] for x in open("./method3/csv/nonadherent_sc_pwl.csv").readlines()]
nonad_not_net = [x.strip().split(",")[0] for x in open("./method3/csv/nonadherent_not_networked_pwl.csv").readlines()]


#check sizes sections

print "ad_giant, ad_sc, ad_not_net", len(ad_giant), len(ad_sc), len(ad_not_net)
print "nonad_giant, nonad_sc, nonad_not_net", len(nonad_giant), len(nonad_sc), len(nonad_not_net)

mean0, median0, stddev0, min0, max0, confidence0 = statistics.stats(values(data, nonad_not_net))
mean1, median1, stddev1, min1, max1, confidence1 = statistics.stats(values(data, nonad_sc))
mean2, median2, stddev2, min2, max2, confidence2 = statistics.stats(values(data, nonad_giant))
mean3, median3, stddev3, min3, max3, confidence3 = statistics.stats(values(data, ad_not_net))
mean4, median4, stddev4, min4, max4, confidence4 = statistics.stats(values(data, ad_sc))
mean5, median5, stddev5, min5, max5, confidence5 = statistics.stats(values(data, ad_giant))

print "means and 95% CIs for (a) and (b) panels"
print 1*-mean0, confidence0, len(nonad_not_net)
print 1*-mean1, confidence1, len(nonad_sc)
print 1*-mean2, confidence2, len(nonad_giant)
print 1*-mean3, confidence3, len(ad_not_net)
print 1*-mean4, confidence4, len(ad_sc)
print 1*-mean5, confidence5, len(ad_giant)

non_ad_not_net_frac  =   frac(list(nonad_not_net))
obj = bootstrapmeans([
    list(not_networked_overweight),
    list(overweight_sc),
    list(overweight_giant)
])
d = obj.hypo()

obj = bootstrapmeans(
    [list(not_networked_obese),
     list(obese_sc),
     list(obese_giant)])
d = obj.hypo()

#calculate stats for each set of values
mean0, median0, stddev0, min0, max0, confidence0 = statistics.stats(
    values(data, not_networked_overweight))
mean1, median1, stddev1, min1, max1, confidence1 = statistics.stats(
    values(data, overweight_sc))
mean2, median2, stddev2, min2, max2, confidence2 = statistics.stats(
    values(data, overweight_giant))
mean3, median3, stddev3, min3, max3, confidence3 = statistics.stats(
    values(data, not_networked_obese))
mean4, median4, stddev4, min4, max4, confidence4 = statistics.stats(
    values(data, obese_sc))
mean5, median5, stddev5, min5, max5, confidence5 = statistics.stats(
    values(data, obese_giant))

not_networked_overweight_frac = frac(not_networked_overweight)
overweight_sc_frac = frac(overweight_sc)
overweight_giant_frac = frac(overweight_giant)
Example #11
0
print ab
print 'std: %.5f'% numpy.std(dataset)
cd = numpy.std(dataset)/math.sqrt(len(dataset))
print 'error : %.5f'%  cd
std = dataset.std()
print std
mean = dataset.mean()
print "std:%.5f"% std
print "mean:%.5f"% mean

interval=stats.t.interval(0.95,len(dataset)-1,5,std)
print interval

ci = stats.norm.interval(0.95, loc=mean, scale=std)
#print ['%f' % mean, '%f' % std, '(%.3f,%.3f)' % (ci[0], ci[1])]
mean, median, stddev, min, max, confidence = statistics.stats(dataset, 0.05)
print mean, stddev, min, max, confidence
print mean-5
print mean + confidence - 5
print mean  - confidence - 5



# 
# def t_test_interval(dataset, percent, expectedVal):
#     std = dataset.std()
#     interval=stats.t.interval(percent, len(dataset)-1, expectedVal, std) 
#     return interval
# 
# def t_test_sample(dataset, percent):
#    mean, median, stddev, min, max, confidence = statistics.stats(dataset, percent)