Beispiel #1
0
def squat_down(angle, angles_arr, squat_knee_angle, left_knee_angle, right_knee_angle, left_hip_gap, right_hip_gap):
    global color
    global test
    global count_flag
    global squat_count
    global video_player#비디오 재생 트리거
    # 내려갔을 때
    x=""
    if mean(angles_arr[-10:-5]) < mean(angles_arr[-5:]) and angle - angles_arr[0]>=10 :
        if left_knee_angle > squat_knee_angle * 1.2 and right_knee_angle > squat_knee_angle * 1.2: 
            test="Lower"
            color = (0,0,250)
        elif (squat_knee_angle <= left_knee_angle < squat_knee_angle * 1.2 or 
                squat_knee_angle <= right_knee_angle < squat_knee_angle * 1.2) or \
                (left_hip_gap < 20 or right_hip_gap < 20):
            test="Good"
            video_player=True #다 앉았을때 video재생 준비 완료
            count_flag = True
            color = (255,0,0)
        x= "down"
        
    # 올라올 때
    if mean(angles_arr[-10:-5]) > mean(angles_arr[-5:]):
        x="up"

    # 내려갔다 올라와서 멈출 때
        if min(angles_arr) * 0.9 < angle < min(angles_arr) * 1.1: #완전히 "섰다"의 인식이 쫌 여유가 있는 듯 합니다
            x = 'ready'
            if count_flag:
                video_player=True#다 서있을 때 video 재생 준비 완료
                count_flag = False
                squat_count +=1
                print(f"rep: {squat_count}")
Beispiel #2
0
def squat_down(angle, angles_arr, squat_knee_angle, left_knee_angle,
               right_knee_angle, left_hip_gap, right_hip_gap, knee_gap):
    global color
    global test
    global count_flag
    global squat_count
    global video_player  #비디오 재생 트리거
    global squat_set
    global running
    global rest_flag
    global knee_flag

    # 내려갔을 때
    if len(angles_arr) > 10:
        if mean(angles_arr[-10:-5]) < mean(
                angles_arr[-5:]) and angle > angles_arr[0] + 5:

            if left_knee_angle > squat_knee_angle * 1.2 and right_knee_angle > squat_knee_angle * 1.2:
                test = "Lower"
                color = (0, 0, 250)

            elif (squat_knee_angle <= left_knee_angle < squat_knee_angle * 1.2 or
                    squat_knee_angle <= right_knee_angle < squat_knee_angle * 1.2) or \
                    (left_hip_gap < 20 or right_hip_gap < 20):
                test = "Good"
                video_player = 0  #다 앉았을때 video재생 준비 완료
                count_flag = True
                color = (255, 0, 0)

            # 무릎이 너무 모이는 경우
            if knee_gap >= 1.2:
                knee_flag = True

        # 올라올 때
        if mean(angles_arr[-10:-5]) > mean(angles_arr[-5:]):
            # 내려갔다 올라와서 멈출 때
            if np.median(angles_arr[:5]) * 0.95 <= angle <= np.median(
                    angles_arr[:5]) * 1.05:  #디테일 조정함@@@@@@@@@@@@@@@@@@@@@@@
                if count_flag:
                    video_player = 1  #다 서있을 때 video 재생 준비 완료
                    print(angle)
                    count_flag = False
                    squat_count += 1
                    if squat_count - CUSTOM_SQUAT_COUNT == 0:
                        squat_set += 1
                        squat_count = 0

                        running = False
                        rest_flag = True

                    print(f"rep: {squat_count}")

                # 무릎이 너무 모이는 경우
                if knee_flag:
                    test = "Knee Wider"
                    color = (0, 0, 255)
                    knee_flag = False
Beispiel #3
0
def do_stuff():
    rolls = np.array(return_med_index(x))
    rolls = rolls.astype(int)
    #print(arr)
    arr.sort()
    s0 = mean(arr)
    s1 = mean(arr[rolls])
    s2 = stats.mode(arr)[0].astype(float)[0]
    print(round(s0, 1))
    print(round(s1, 1))
    print(round(s2, 1))
Beispiel #4
0
 def filter_CS_candidates(self, vehicles, charging_stations):
     d = great_circle_distance(vehicles.lat.values, vehicles.lon.values,
                               mean(charging_stations[:, 0]),
                               mean(charging_stations[:, 1]))
     within_limit_distance = d < 1e3 * (
         self.reject_distance + self.unit_length *
         (self.k - 1))  # a large enough number
     candidates = vehicles.index[within_limit_distance]
     d = d[within_limit_distance]
     return candidates[np.argsort(d)[:2 * len(charging_stations) +
                                     1]].tolist()
def log_stats(ds_data: DsDataList, text_data: TextDataList):
    stats: List[str, int, float, float, float] = []
    text_lengths = [len(x.symbols) for x in text_data.items()]
    stats.append((
        "Overall",
        len(text_lengths),
        min(text_lengths),
        max(text_lengths),
        mean(text_lengths),
        sum(text_lengths),
    ))

    speakers_text_lengths: Dict[Speaker, List[float]] = {}
    for ds_entry, text_entry in zip(ds_data.items(), text_data.items()):
        if ds_entry.speaker_name not in speakers_text_lengths:
            speakers_text_lengths[ds_entry.speaker_name] = []
        speakers_text_lengths[ds_entry.speaker_name].append(
            len(text_entry.symbols))

    for speaker, speaker_text_lengths in speakers_text_lengths.items():
        stats.append((
            speaker,
            len(speaker_text_lengths),
            min(speaker_text_lengths),
            max(speaker_text_lengths),
            mean(speaker_text_lengths),
            sum(speaker_text_lengths),
        ))

    stats.sort(key=lambda x: (x[-1]), reverse=True)
    stats_csv = pd.DataFrame(stats,
                             columns=[
                                 "Speaker",
                                 "# Entries",
                                 "# Min",
                                 "# Max",
                                 "# Avg",
                                 "# Total",
                             ])

    logger = getLogger(__name__)
    with pd.option_context(
            'display.max_rows',
            None,
            'display.max_columns',
            None,
            'display.width',
            None,
            'display.precision',
            0,
    ):
        logger.info(stats_csv)
def log_stats(ds_data: DsDataList, wav_data: WavDataList):
  logger = getLogger(__name__)
  if len(wav_data) > 0:
    logger.info(f"Sampling rate: {wav_data.items()[0].wav_sampling_rate}")
  stats: List[str, int, float, float, float, int] = []

  durations = [x.wav_duration for x in wav_data.items()]
  stats.append((
    "Overall",
    len(wav_data),
    min(durations),
    max(durations),
    mean(durations),
    sum(durations) / 60,
    sum(durations) / 3600,
  ))
  speaker_durations: Dict[Speaker, List[float]] = {}
  for ds_entry, wav_entry in zip(ds_data.items(), wav_data.items()):
    if ds_entry.speaker_name not in speaker_durations:
      speaker_durations[ds_entry.speaker_name] = []
    speaker_durations[ds_entry.speaker_name].append(wav_entry.wav_duration)
  for speaker_name, speaker_durations in speaker_durations.items():
    stats.append((
      speaker_name,
      len(speaker_durations),
      min(speaker_durations),
      max(speaker_durations),
      mean(speaker_durations),
      sum(speaker_durations) / 60,
      sum(speaker_durations) / 3600,
    ))

  stats.sort(key=lambda x: (x[-2]), reverse=True)
  stats_csv = pd.DataFrame(stats, columns=[
    "Speaker",
    "# Entries",
    "Min (s)",
    "Max (s)",
    "Avg (s)",
    "Total (min)",
    "Total (h)",
  ])

  with pd.option_context(
    'display.max_rows', None,
    'display.max_columns', None,
    'display.width', None,
    'display.precision', 4,
  ):
    print(stats_csv)
Beispiel #7
0
 def __get_ordering(self, obs1: List[float], obs2: List[float],
                    p_value: float) -> Literal['<', '=', '>']:
     """
     Compares the p-value from the t-test of the two samples and returns the statistical significant ordering
     The '=' ordering operator is used to signify no statistical significant ordering for the two samples
     """
     P_LIMIT = 0.05
     if p_value < P_LIMIT:
         diff = mean(obs1) - mean(obs2)
         if diff > 0:
             return '>'
         elif diff < 0:
             return '<'
     return '='
Beispiel #8
0
def main():
    get_args()

    def sentences():
        return chain.from_iterable(
            (read_slice(data) for data in read_corpus()))

    bigram = Phrases(sentences(), min_count=1, threshold=1, delimiter=b' ')
    bigram_phraser = Phraser(bigram)

    bigrammed = map(lambda x: bigram_phraser[x], sentences())

    trigram = Phrases(bigrammed, min_count=1, threshold=1, delimiter=b' ')
    trigram_phraser = Phraser(trigram)

    only_trigrams = {b' '.join(trigram_tuple): score for (trigram_tuple, score) in \
        trigram_phraser.phrasegrams.items() if b' '.join(trigram_tuple).count(b' ') == 2}

    for key, value in sorted(only_trigrams.items(),
                             key=lambda item: item[1],
                             reverse=True)[:10]:
        print(key, value)

    scores = list(only_trigrams.values())
    print("""
    Unique trigrams: {unique}
    Mean score:{mean}
    Max score:{max}
    Min score:{min}
    """.format(unique=len(only_trigrams),
               mean=mean(scores) if len(scores) != 0 else 0,
               max=max(scores) if len(scores) != 0 else 0,
               min=min(scores) if len(scores) != 0 else 0))
Beispiel #9
0
def plotTable(v, vol, prob, lbds, MOQ, name):
    """
   DEFINITION:
   v: index in volatility array
   vol: volatility array. Contains simulated volatilities.
   name: name of the plot. 
   """
    np, _, nr, _ = shape(MOQ)
    MOQshape = zeros((np, nr))
    for p in xrange(len(prob)):
        for r in xrange(len(lbds)):
            MOQshape[p, r] = mean(MOQ[p, v, r, :])
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    X = lbds
    Y = prob
    X, Y = meshgrid(X, Y)
    Z = MOQshape
    plt.xlabel('Lambdas')
    plt.ylabel('Probabilities')
    plt.title(name + ' for fixed lambdas - v=' + str(vol[v]))
    surf = ax.plot_surface(X,
                           Y,
                           Z,
                           rstride=1,
                           cstride=1,
                           cmap=cm.jet,
                           linewidth=0,
                           antialiased=False)
    ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
    fig.colorbar(surf, shrink=0.5, aspect=5)
Beispiel #10
0
    def _collapse_column(self, column_name, current_value):
        """Here is where values are collapsed based on the input values.  Assumed t be separated by a "|"
        current_value is assumed to be a string.
        """
        if column_name not in self._columns_to_collapse:
            return current_value

        list_vals = current_value.split("|")
        no_blank_vals = [v for v in list_vals if v.strip() != "" and v.strip() != "."]

        # if no "." is found in the value, assume that this should be rendered as an int, if MIN was chosen
        #   we assume it is an int if no "." is found.
        is_assuming_int = all([v.find(".") == -1 for v in no_blank_vals])

        try:
            if is_assuming_int:
                final_vals = [int(v) for v in no_blank_vals]
            else:
                final_vals = [float(v) for v in no_blank_vals]
        except ValueError as te:
            logging.getLogger(__name__).warn("Could not collapse " + column_name + ":" + current_value + " into one number.  Returning the input value.")
            return current_value

        if len(final_vals) == 0:
            return ""

        if self._method_dict[column_name] == ColumnCollapser.MIN:
            return str(min(final_vals))

        elif self._method_dict[column_name] == ColumnCollapser.MEAN:
            return str(mean(final_vals))
Beispiel #11
0
 def PlotWss(self, meshid, imagpath):
     '''
     This method plots Wss signal and returns peak wss.
     '''
     try:
         import matplotlib
         matplotlib.use('Agg') #switch to matplotlib.use('WXAgg') if you want to show and not save velocity profile.
         from matplotlib.pyplot import plot, xlabel, ylabel, title, legend, savefig, close, ylim
     except:
         sys.exit("PlotWss method requires matplotlib package (http://matplotlib.sourceforge.net.\n")
     
     tplot = linspace(0, self.tPeriod, len(self.Tauplot))
     plot(tplot, self.Tauplot,'g-',linewidth = 3, label = 'WSS')
     minY = 0
     for w in self.Tauplot:
         if w < minY:
             minY = w
     
     if minY != 0:
         plot(tplot, zeros(len(self.Tauplot)),':',linewidth = 1)
         
     ylim(ymin=minY)
     
     xlabel('Time ($s$)')
     ylabel('Wall shear stress ($dyne/cm^2$)')
     title ('Wss'+' peak:'+str(round(max(self.Tauplot),1))+' mean:'+str(round(mean(self.Tauplot),1))+' min:'+str(round(min(self.Tauplot),1)))    
     legend()
     savefig(imagpath+str(meshid)+'_'+str(self.Name)+'_wss.png')
     print "Wss, MeshId", meshid, self.Name, "=", str(round(max(self.Tauplot),1)), "$dyne/cm^2$"
     close()
     return (round(max(self.Tauplot),1))
Beispiel #12
0
def trans_lcqmc_bert(dataset: list, vocab: Vocabulary, is_merge=0):
    """
    最大长度
    """
    out_arr, text_len = [], []
    for each in dataset:
        t1, t2, label = each.text_a, each.text_b, int(each.label)
        if is_merge:
            out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(
                t1, t2, padding=1)
            out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
            text_len.extend([len(t1) + len(t2)])
        else:
            out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(
                t1, padding=1)
            out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(
                t2, padding=1)
            out_arr.append([
                out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2,
                seg_ids2, seq_len2, label
            ])
            text_len.extend([len(t1), len(t2)])
        pass
    print("max len", max(text_len), "avg len", mean(text_len), "cover rate:",
          np.mean([x <= conf.max_seq_len for x in text_len]))
    return out_arr
Beispiel #13
0
def normalizeData(data, meanOnly = False):
    """
    normalize data by subtracting mean and dividing by sd per COLUMN
    @param data: an array
    @param meanOnly: if True subtract mean only; otherwise divide by sd too
    @return: (an array with the same dimension as data, mean, stds, the transformer)
    """

    # compute the new data
    m = mean(data, axis=0)
    res = data - m

    if meanOnly:
        stds = 1
    else:
        stds = sqrt(var(data, axis=0))
        stds[stds==0] = 1   # to avoid dividing by 0

        res /= stds

    # figure out the transformer
    def foo(givenData):
        assert givenData.shape[1]==data.shape[1], "Only arrays of %d columns are handled." % data.shape[1]
        return (givenData - m)/stds

    return res, m, stds, foo
Beispiel #14
0
    def printMe(self):
        """print the result of doVse in an accessible format.
        for instance:
        vses.printMe()

        """
        for i in range(len(self.methods)):
            print(self.methods[i][0])
            print([strat.getName() for strat in self.methods[i][1]],
                  [mean([result[i].results[j].result[0] for result in self.vses])
                      for j in range(len(self.methods[i][1]) - 1)],
                  mean(
                       [(0 if result[i].results[0].result[0]==result[i].results[2].result[0] else 1)
                            for result in self.vses]
                       )
                  )
Beispiel #15
0
def normalizeData(data, meanOnly = False):
    """
    normalize data by subtracting mean and dividing by sd per COLUMN
    Parameters:
        - data: an array
        - meanOnly: if True subtract mean only; otherwise divide by sd too
    Returns: an array with the same dimension as data
    """
    if meanOnly:
        return data-mean(data,axis=0)

    else:
        stds = sqrt(var(data, axis=0))
        stds[stds==0] = 1   # to avoid dividing by 0

        return (data - mean(data, axis=0))/stds
Beispiel #16
0
    def _phase_3(self, npoints):
        normalized_edges_dict = {}
        to_remove = set()
        for node in range(npoints):
            # transforms int, int, dict into int, int, float (src, dst, weight)
            tmp = set(
                Edge.tuple_representation(Edge.convert_weight_dict(t))
                for t in self.g.edges(node, data=True))
            normalized_edges_dict[node] = tmp
        for node in range(npoints):
            local_edges = normalized_edges_dict[node].copy()
            edges2 = local_edges
            for neigh in local_edges:
                other = neigh[1]
                other_edges = normalized_edges_dict[other]
                edges2 = edges2.union(other_edges)
            mean2 = mean(list(map(lambda e: e[-1],
                                  edges2))) if len(edges2) > 0 else 0
            self.local_mean2.append(mean2)

            for e in edges2:
                w = e[-1]
                if w > (mean2 + self.mean_std_dev):
                    to_remove.add((e[0], e[1]))

        self.local_mean2 = np.array(self.local_mean2)
        for e in to_remove:
            self.g.remove_edge(e[0], e[1])

        self.labels_, self.cluster_sizes = self._label_conn_comp(
            self.shuffle_labels)
        return
Beispiel #17
0
    def GetTaoFromQ(self, el):
        '''
        Computing wall shear stress in terms of the flow rate,
        using inverse womersley method of Cezeaux et al.1997
        '''
        self.radius = mean(el.Radius)
        self.Res = el.R
        self.length = el.Length
        self.Name = el.Name

        #WOMERSLEY NUMBER
        self.alpha = self.radius * sqrt(
            (2.0 * pi * self.density) / (self.tPeriod * self.viscosity))

        #FOURIER SIGNAL
        k = len(self.signal)
        n = 0
        while n < (self.nHarmonics):
            An = 0
            Bn = 0
            for i in arange(k):
                An += self.signal[i] * cos(
                    n * (2.0 * pi / self.tPeriod) * self.dt * self.nSteps[i])
                Bn += self.signal[i] * sin(
                    n * (2.0 * pi / self.tPeriod) * self.dt * self.nSteps[i])
            An = An * (2.0 / k)
            Bn = Bn * (2.0 / k)
            self.fourierModes.append(complex(An, Bn))
            n += 1

        self.Steps = linspace(0, self.tPeriod, self.samples)
        self.WssSignal = []
        self.Tauplot = []

        for step in self.Steps:
            self.tao = -self.fourierModes[0].real * 2.0

            k = 1
            while k < self.nHarmonics:
                cI = complex(0., 1.)
                cA = (self.alpha * pow((1.0 * k), 0.5)) * pow(cI, 1.5)
                c1 = 2.0 * jn(1, cA)
                c0 = cA * jn(0, cA)
                cT = complex(0, -2.0 * pi * k * self.t / self.tPeriod)
                '''tao computation'''
                taoNum = self.alpha**2 * cI**3 * jn(1, cA)
                taoDen = c0 - c1
                taoFract = taoNum / taoDen
                cTao = self.fourierModes[k] * exp(cT) * taoFract
                self.tao += cTao.real
                k += 1

            self.tao *= -(self.viscosity / (self.radius**3 * pi))
            self.Tauplot.append(self.tao * 10)  #dynes/cm2
            self.WssSignal.append(self.tao)
            self.t += self.dtPlot

        return self.WssSignal  #Pascal
Beispiel #18
0
    def _mean_vote(self, all_predictions):
        """Returns mean of the predictions

        Args:
            all_predictions (List[List[float]]): The predictions from all models, per event

        Returns:
            List[Float]: The mean of predictions from all models, per event
        """
        return [mean(predictions) for predictions in all_predictions]
Beispiel #19
0
 def getSensitivity(self):
     allSens = []
     for example in self.examples:
         sens = []
         modifiedExample = example
         isMyClass = -1.0
         if example["digit"] == self.myDigit:
             if example["isProper"]:
                 isMyClass = 1.0
         result = self.classify(example["pixelMap"])
         for i in range(len(example["pixelMap"])):
             modifiedExample["pixelMap"][
                 i] = modifiedExample["pixelMap"][i] + self.H
             modifiedResult = self.classify(modifiedExample["pixelMap"])
             sens.append((modifiedResult - result) / self.H)
         allSens.append(mean(sens))
     self.sensitivity = mean(allSens)
     print("Sensitivity for digit %s: %s" %
           (self.myDigit, self.sensitivity))
Beispiel #20
0
    def GetTaoFromQ(self,el):
        '''
        Computing wall shear stress in terms of the flow rate,
        using inverse womersley method of Cezeaux et al.1997
        '''
        self.radius = mean(el.Radius)        
        self.Res = el.R
        self.length = el.Length
        self.Name = el.Name
        
        #WOMERSLEY NUMBER
        self.alpha = self.radius * sqrt((2.0 *pi*self.density)/(self.tPeriod*self.viscosity))
        
        #FOURIER SIGNAL
        k = len(self.signal)
        n = 0
        while n < (self.nHarmonics):
            An = 0
            Bn = 0
            for i in arange(k):
                An += self.signal[i] * cos(n*(2.0*pi/self.tPeriod)*self.dt*self.nSteps[i])
                Bn += self.signal[i] * sin(n*(2.0*pi/self.tPeriod)*self.dt*self.nSteps[i])
            An = An * (2.0/k)
            Bn = Bn * (2.0/k)
            self.fourierModes.append(complex(An, Bn))
            n+=1
        
        self.Steps = linspace(0,self.tPeriod,self.samples)
        self.WssSignal = []  
        self.Tauplot = []
       
        for step in self.Steps:
            self.tao = -self.fourierModes[0].real * 2.0 
            
            k=1
            while k < self.nHarmonics:  
                cI = complex(0.,1.)
                cA = (self.alpha * pow((1.0*k),0.5)) * pow(cI,1.5)  
                c1 = 2.0 * jn(1, cA)
                c0 = cA * jn(0, cA)
                cT = complex(0, -2.0*pi*k*self.t/self.tPeriod)  
                '''tao computation'''
                taoNum = self.alpha**2*cI**3*jn(1,cA)
                taoDen = c0-c1
                taoFract = taoNum/taoDen
                cTao = self.fourierModes[k] * exp(cT) * taoFract
                self.tao += cTao.real
                k+=1

            self.tao *= -(self.viscosity/(self.radius**3*pi))
            self.Tauplot.append(self.tao*10) #dynes/cm2
            self.WssSignal.append(self.tao)
            self.t += self.dtPlot
            
        return self.WssSignal #Pascal
Beispiel #21
0
 def getMostSevereValue(self, minNInstants=1): # TODO use np.percentile
     from matplotlib.mlab import find
     from numpy.core.multiarray import array
     from numpy.core.fromnumeric import mean
     values = array(self.values.values())
     indices = range(len(values))
     if len(indices) >= minNInstants:
         values = sorted(values[indices], reverse = self.mostSevereIsMax) # inverted if most severe is max -> take the first values
         return mean(values[:minNInstants])
     else:
         return None
Beispiel #22
0
def main():
    os.chdir(r'G:\Shared drives\Apex\Acoustic Data\IOA, conference proceedings\2021 papers\Max noise levels from hockey pitches')
    # adjust_value_by_LAeqT()
    LAFmax = []
    filenames = [str(1+n)+'.wav' for n in range(104)]
    LAFmax = [calc_LAeq_dt(os.path.join('bat hitting', fn)) for fn in filenames]
    hist_plot(LAFmax)
    print(mean(LAFmax))
    print(std(LAFmax))
    plt.xlabel('LAFmax at 11 m')
    plt.show()
Beispiel #23
0
def squat_down(angle, angles_arr, squat_knee_angle, left_knee_angle, right_knee_angle, left_hip_gap, right_hip_gap):
    global color
    global test
    global count_flag
    global squat_count
    global video_player#비디오 재생 트리거
    global squat_set
    global running
    global rest_flag

    # 내려갔을 때
    if mean(angles_arr[-10:-5]) < mean(angles_arr[-5:]) and angle - angles_arr[0]>=10 :
        if left_knee_angle > squat_knee_angle * 1.2 and right_knee_angle > squat_knee_angle * 1.2: 
            test="Lower"
            color = (0,0,250)
        elif (squat_knee_angle <= left_knee_angle < squat_knee_angle * 1.2 or 
                squat_knee_angle <= right_knee_angle < squat_knee_angle * 1.2) or \
                (left_hip_gap < 20 or right_hip_gap < 20):
            test="Good"
            video_player=0 #다 앉았을때 video재생 준비 완료
            count_flag = True
            color = (255,0,0)
        
    # 올라올 때
    if mean(angles_arr[-10:-5]) > mean(angles_arr[-5:]):

    # 내려갔다 올라와서 멈출 때
        if min(angles_arr) * 0.9 < angle < min(angles_arr) * 1.1: #완전히 "섰다"의 인식이 쫌 여유가 있는 듯 합니다
            if count_flag:
                video_player=1#다 서있을 때 video 재생 준비 완료
                count_flag = False
                squat_count +=1
                if squat_count % CUSTOM_SQUAT_COUNT == 0:
                    squat_set += 1
                    squat_count = 0

                    running = False
                    rest_flag = True

                print(f"rep: {squat_count}")
Beispiel #24
0
 def GetRadius(self, abscissa):
     '''
     This method returns edge's radius
     '''
     if 'value' in self.Radius:
         return self.Radius['value']
     if 'array' in self.Radius:
         if abscissa is not None:
             return self.Radius['array'][abscissa]
         else:
             if self.edgeAbscissa is None: 
                 return mean(array((self.Radius['array'].values())))     
             else:
                 return self.Radius['array'][self.edgeAbscissa]    
def Classic(inputImage, decodedObjects, X0):
    Data = []
    Distance = []
    dY = []
    for i in range(0, len(decodedObjects)):
        zbarData = decodedObjects[i].data
        arr = list(map(float, zbarData.split()))
        Data.append([arr[2], arr[3]])
        polygon = decodedObjects[i].polygon
        SIDE_OF_QR = arr[0]
        H_QR = arr[1] - H_CAMERA

        data = polygon[:]
        if ((polygon[0].y + 30) < (polygon[1].y)):
            data = polygon[:]
            key = False
        else:
            key = True
            data[0] = polygon[3]
            data[1] = polygon[0]
            data[2] = polygon[1]
            data[3] = polygon[2]

        centerTop = getCenter(data[0], data[3])
        centerBottom = getCenter(data[1], data[2])

        a = distanceCalculate2(data[0], data[1], H_QR, SIDE_OF_QR)
        b = distanceCalculate2(centerTop, centerBottom, H_QR, SIDE_OF_QR)
        d = distanceCalculate2(data[2], data[3], H_QR, SIDE_OF_QR)

        b = mean([a, b, d])

        #dY.append( coordY(centerTop, centerBottom,(centerTop.x+centerBottom.x)/2.,SIDE_OF_QR) ) альтернативный метод

        dy = coordY(centerTop, centerBottom,
                    (centerTop.x + centerBottom.x) / 2., SIDE_OF_QR)

        b = (b * b + dy * dy)

        Distance.append(b)
    res = scipy.optimize.leastsq(RP, X0, args=(Distance, Data))
    x = res[0]
    #dy=(dY[0]+dY[1])/2 алтернативный метод
    #x[1]=x[1]-dy
    cv2.putText(inputImage,
                f"X(gl) = {round(x[0], 3)}, Y(gl) = {round(x[1],3)} ",
                (320, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2,
                cv2.LINE_AA)

    return x[0], x[1]
def get_rewards(file):
    rewards = []
    steps = []
    with open(file) as f:
        for l in f.readlines()[2:]:
            rewards.append(float(l.split(",")[0]))
            steps.append(int(l.split(",")[1]))
    steps = list(np.cumsum(steps))
    final_r = []
    final_s = []
    for i in range(1000, len(rewards), 1):
        final_r.append(mean(rewards[i - 1000:i]))
        final_s.append(steps[i])
    return final_r, final_s
Beispiel #27
0
 def GetRadius(self, abscissa):
     '''
     This method returns edge's radius
     '''
     if 'value' in self.Radius:
         return self.Radius['value']
     if 'array' in self.Radius:
         if abscissa is not None:
             return self.Radius['array'][abscissa]
         else:
             if self.edgeAbscissa is None:
                 return mean(array((self.Radius['array'].values())))
             else:
                 return self.Radius['array'][self.edgeAbscissa]
Beispiel #28
0
 def calc_LAeq0800_1800(self):
     all_data = []
     for n in range(self.days):
         day_n = self.start_date + pd.Timedelta(n, "days")
         start_time = day_n + pd.Timedelta(8, "hours")
         end_time = day_n + pd.Timedelta(18, "hours")
         df = self.noise[(self.noise['Time_obj'] >= start_time)
                         & (self.noise['Time_obj'] < end_time)]
         LAeq0800_1800 = 10. * np.log10(mean(10**(df['Leq'].values / 10)))
         all_data.append(LAeq0800_1800)
     output = pd.DataFrame(all_data,
                           index=self.day_all,
                           columns=['LAeq0800_1800'])
     output = output.transpose()
     output.to_csv('LAeq0800_1800.csv')
Beispiel #29
0
    def vseOn(self, voters, chooserFuns=(), **args):
        """Finds honest and strategic voter satisfaction efficiency (VSE)
        for this method on the given electorate.
        """
        multiResults = self.multiResults(voters, chooserFuns, **args)
        utils = voters.socUtils
        best = max(utils)
        rand = mean(utils)

        #import pprint
        #pprint.pprint(multiResults)
        vses = VseMethodRun(self.__class__, chooserFuns,
                    [VseOneRun([(utils[self.winner(result)] - rand) / (best - rand)],tally,chooser)
                        for ((result, chooser), tally) in multiResults[0]])
        vses.extraEvents=multiResults[1]
        return vses
Beispiel #30
0
    def vseOn(self, voters, chooserFuns=(), **args):
        """Finds honest and strategic voter satisfaction efficiency (VSE)
        for this method on the given electorate.
        """
        multiResults = self.multiResults(voters, chooserFuns, **args)
        utils = voters.socUtils
        best = max(utils)
        rand = mean(utils)

        #import pprint
        #pprint.pprint(multiResults)
        vses = VseMethodRun(self.__class__, chooserFuns,
                    [VseOneRun([(utils[self.winner(result)] - rand) / (best - rand)],tally,chooser)
                        for (result, chooser, tally) in multiResults[0]])
        vses.extraEvents=multiResults[1]
        return vses
Beispiel #31
0
 def calculateFitness(self, population, _):
     if not self.inited:
         self.inited = True
         return
     sys.stdout.flush()
     all = []
     for list in population:
         for ind in list["individuals"]:
             all.append(ind)
     num_threads = int(mp.cpu_count() - 1)
     pool = mp.Pool(num_threads)
     res = pool.map(self.battle, all)
     for ind, fitness in zip(all, res):
         ind.setFitness(fitness)
     average = mean(res)
     print(f"average fitness {average}")
     pool.close()
Beispiel #32
0
def trans_lcqmc(dataset):
    """
    最大长度
    """
    out_arr, text_len =  [], []
    for each in dataset:
        t1, t2, label = each.text_a, each.text_b, int(each.label)
        t1_ids = convert_word2id(t1, conf.vocab_map)
        t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
        t2_ids = convert_word2id(t2, conf.vocab_map)
        t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
        # t2_len = len(t2) 
        out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
        # out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
        text_len.extend([len(t1), len(t2)])
        pass
    print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
    return out_arr
Beispiel #33
0
def evaluate_populations(swarm, envs: List, eval_episode: int) -> Tuple[float, float]:
    scores = np.zeros(shape=len(envs))
    for i, agent in enumerate(swarm.population):
        env = envs[i]
        score = 0
        for i_episode in range(eval_episode):
            obs = env.reset()
            while True:
                action = agent.choose_action(obs, use_noise=False)
                next_obs, reward, done, _ = env.step(action)
                score += reward
                if done:
                    break
                else:
                    obs = next_obs
        scores[i] = score / eval_episode

    return mean(scores), max(scores)
Beispiel #34
0
def clean_raw_data(df):
    """ Takes a dataframe and performs four steps:
            - Selects columns for modeling
            - For numeric variables, replaces 0 values with mean for that region
            - Fills invalid construction_year values with the mean construction_year
            - Converts strings to categorical variables
            
        :param df: A raw dataframe that has been read into pandas
        :returns: A dataframe with the preprocessing performed.
    """
    useful_columns = [
        'amount_tsh', 'gps_height', 'longitude', 'latitude', 'region',
        'population', 'construction_year', 'extraction_type_class',
        'management_group', 'quality_group', 'source_type', 'waterpoint_type',
        'status_group'
    ]
    df_input = df[useful_columns].copy()
    zero_is_bad_value = ['longitude', 'population']
    other_bad_value = ['latitude']
    # df_input = replace_value_with_grouped_mean(df_input)
    # df_input = replace_value_with_grouped_mean(df_input, np.nan, 'construction_mean')
    for col in useful_columns:
        if df_input[col].dtype == 'object':
            df_input[col] = df_input[col].astype("category")
            # print('change col {} format'.format(col))
        if col == 'construction_year':
            invalid_rows = df_input[col] <= 1000
            valid_mean = mean(df_input.loc[(~invalid_rows), col])
            df_input.loc[invalid_rows, col] = valid_mean
            # print('change all construction year less than 1000 to mean')
        if col in zero_is_bad_value:
            df_input = replace_value_with_grouped_mean(df_input, 0, col,
                                                       'region')
            print("Change col {} from 0 to mean".format(col))
        if col in other_bad_value:
            df_input = replace_value_with_grouped_mean(df_input, -2e-8, col,
                                                       'region')
            print("Change col {} from -2e-8 to mean".format(col))

    return df_input
Beispiel #35
0
def indicatorMap(indicatorValues, trajectory, squareSize):
    '''Returns a dictionary 
    with keys for the indices of the cells (squares)
    in which the trajectory positions are located
    at which the indicator values are attached

    ex: speeds and trajectory'''

    from numpy import floor, mean
    assert len(indicatorValues) == trajectory.length()
    indicatorMap = {}
    for k in xrange(trajectory.length()):
        p = trajectory[k]
        i = floor(p.x/squareSize)
        j = floor(p.y/squareSize)
        if indicatorMap.has_key((i,j)):
            indicatorMap[(i,j)].append(indicatorValues[k])
        else:
            indicatorMap[(i,j)] = [indicatorValues[k]]
    for k in indicatorMap.keys():
        indicatorMap[k] = mean(indicatorMap[k])
    return indicatorMap
Beispiel #36
0
 def resultsTable(self, eid, emodel, cands, voters, chooserFuns=(), **args):
     multiResults = self.multiResults(voters, chooserFuns, **args)
     utils = voters.socUtils
     best = max(utils)
     rand = mean(utils)
     rows = list()
     nvot=len(voters)
     for ((result, chooser), tally) in multiResults[0]:
         row = {
             "eid":eid,
             "emodel":emodel,
             "ncand":cands,
             "nvot":nvot,
             "best":best,
             "rand":rand,
             "method":str(self),
             "chooser":chooser,#.getName(),
             "util":utils[self.winner(result)],
             "vse":(utils[self.winner(result)] - rand) / (best - rand)
         }
         #print(tally)
         for (i, (k, v)) in enumerate(tally.items()):
             #print("Result: tally ",i,k,v)
             row["tallyName"+str(i)] = str(k)
             row["tallyVal"+str(i)] = str(v)
         rows.append(row)
     if len(multiResults[1]):
         row = {
             "eid":eid,
             "emodel":emodel,
             "method":self.__class__.__name__,
             "chooser":"extraEvents",
             "util":None
         }
         for (i, (k, v)) in enumerate(multiResults[1]):
             row["tallyName"+str(i)] = str(k)
             row["tallyVal"+str(i)] = str(v)
         rows.append(row)
     return(rows)
Beispiel #37
0
 def resultsTable(self, eid, emodel, cands, voters, chooserFuns=(), **args):
     multiResults = self.multiResults(voters, chooserFuns, **args)
     utils = voters.socUtils
     best = max(utils)
     rand = mean(utils)
     rows = list()
     nvot = len(voters)
     for (result, chooser, tallyItems) in multiResults:
         row = {
             "eid": eid,
             "emodel": emodel,
             "ncand": cands,
             "nvot": nvot,
             "best": best,
             "rand": rand,
             "method": str(self),
             "chooser": chooser,  #.getName(),
             "util": utils[self.winner(result)],
             "vse": (utils[self.winner(result)] - rand) / (best - rand)
         }
         #print(tallyItems)
         for (i, (k, v)) in enumerate(tallyItems):
             #print("Result: tally ",i,k,v)
             row["tallyName" + str(i)] = str(k)
             row["tallyVal" + str(i)] = str(v)
         rows.append(row)
     # if len(multiResults[1]):
     #     row = {
     #         "eid":eid,
     #         "emodel":emodel,
     #         "method":self.__class__.__name__,
     #         "chooser":"extraEvents",
     #         "util":None
     #     }
     #     for (i, (k, v)) in enumerate(multiResults[1]):
     #         row["tallyName"+str(i)] = str(k)
     #         row["tallyVal"+str(i)] = str(v)
     #     rows.append(row)
     return (rows)
Beispiel #38
0
    def PlotWss(self, meshid, imagpath):
        '''
        This method plots Wss signal and returns peak wss.
        '''
        try:
            import matplotlib
            matplotlib.use(
                'Agg'
            )  #switch to matplotlib.use('WXAgg') if you want to show and not save velocity profile.
            from matplotlib.pyplot import plot, xlabel, ylabel, title, legend, savefig, close, ylim
        except:
            sys.exit(
                "PlotWss method requires matplotlib package (http://matplotlib.sourceforge.net.\n"
            )

        tplot = linspace(0, self.tPeriod, len(self.Tauplot))
        plot(tplot, self.Tauplot, 'g-', linewidth=3, label='WSS')
        minY = 0
        for w in self.Tauplot:
            if w < minY:
                minY = w

        if minY != 0:
            plot(tplot, zeros(len(self.Tauplot)), ':', linewidth=1)

        ylim(ymin=minY)

        xlabel('Time ($s$)')
        ylabel('Wall shear stress ($dyne/cm^2$)')
        title('Wss' + ' peak:' + str(round(max(self.Tauplot), 1)) + ' mean:' +
              str(round(mean(self.Tauplot), 1)) + ' min:' +
              str(round(min(self.Tauplot), 1)))
        legend()
        savefig(imagpath + str(meshid) + '_' + str(self.Name) + '_wss.png')
        print "Wss, MeshId", meshid, self.Name, "=", str(
            round(max(self.Tauplot), 1)), "$dyne/cm^2$"
        close()
        return (round(max(self.Tauplot), 1))
Beispiel #39
0
def problem2(data, figureDir, dataName, l, maxNumRepetitions, minSampleSize, targetValue):
    if os.path.exists(getProblem2FigureLoc(figureDir, dataName, l, maxNumRepetitions)):
        #this implies that all figures before it have been created already, so we don't need to repeat them
        return
    MSEValues = [[] for x in xrange(minSampleSize, len(data[TRAIN]) + 1)]
    sampleSizeValueList = range(minSampleSize, len(data[TRAIN]) + 1, 1)
    sampleSizeValueArray = array(sampleSizeValueList)
    
    targetArray = targetValue * ones(len(sampleSizeValueList), dtype=numpy.float64)
    
    for repeatNum in range(1, maxNumRepetitions+1):
        #randomly choose ordering of the samples for this run
        #make the range of indexes, then shuffle them into a random order
        randomlySortedIndexes = range(len(data[TRAIN]))
        shuffle(randomlySortedIndexes)
        
        #start with a sample size of one, go to the total training set
        for sampleSizeIndex, sampleSize in enumerate(sampleSizeValueList):
            curSampleIndexesList = randomlySortedIndexes[:sampleSize]
            curTrainSample = selectSample(data[TRAIN], curSampleIndexesList)
            curTrainLabelSample = selectSample(data[TRAIN_LABELS], curSampleIndexesList)
            w = doubleU(phi(curTrainSample), l, tListToTVector(curTrainLabelSample))
            curSampleMSE = MSE(data[TEST], w, data[TEST_LABELS])
            MSEValues[sampleSizeIndex].append(squeeze(curSampleMSE))
    #have a sample size of 0 is meaningless
    curRepeatMeanMSEValues = array([mean(array(x, dtype=numpy.float64)) for x in MSEValues])
        
    plt.plot(sampleSizeValueArray, curRepeatMeanMSEValues, '-', label="Learning curve")
    plt.plot(sampleSizeValueArray, targetArray, '--', label="Target MSE")
    plt.title("lamba = " + str(l) + " - " + str(repeatNum) + " repetitions")
    plt.xlabel("Sample Size - minimum " + str(minSampleSize))
    plt.ylabel("MSE on Full Test Set")
    plt.xlim(xmin=targetValue - .5)
    plt.legend(loc=0)
    plt.savefig(getProblem2FigureLoc(figureDir, dataName, l, repeatNum))
    plt.clf()
Beispiel #40
0
outputPath = path + 'statistics/'
begin = time.time()

prob = array([.55, .65, .75, .85, .95])
vol = array([0.0,0.001,0.005,0.01,0.05,0.1])
lRates = [0.01,0.11,0.21,0.31,0.41,0.51, 0.61, 0.71, 0.81, 0.91]
nEpisodes = 50

meanSquareError = np.zeros((len(prob), len(vol),len(lRates), nEpisodes))
rightEstimate = np.zeros((len(prob), len(vol), len(lRates),nEpisodes))
rightPrediction = np.zeros((len(prob), len(vol), len(lRates),nEpisodes))
rewardedTrials = np.zeros((len(prob), len(vol), len(lRates),nEpisodes))
totalIter = len(prob)*len(vol)*nEpisodes*len(lRates)
n=1 #iterations counter
for v in xrange(len(vol)):
   for p in xrange(len(prob)):
      environment = loadArrangeVar(prob[p], vol[v], path,'environment')
      for r in xrange(len(lRates)):
         for e in xrange(nEpisodes):
            agent = loadCtLbdEpisodeVar(prob[p], vol[v], lRates[r],e, path,'agent')
            meanSquareError[p][v][r][e] = mean(agent.err**2)
            rightEstimate[p][v][r][e] = np.sum(around(agent.x[1:]) == around(environment.history)) / float(environment.history.size)*100 #x has a shape of nTrials+1 and history of nTrials. That is because after the last trial the agent learns the value of x for the next
            rightPrediction[p][v][r][e] = np.sum(around(agent.x[0:-1]) == around(environment.history)) / float(environment.history.size)*100
            rewardedTrials[p][v][r][e] = float(np.sum(agent.r))/agent.x.size*100 #Calculates how often the agent was rewarded within the episode

            showProgress(totalIter, n, time.time(), begin)
            n+=1

variables = {'meanSquareError':meanSquareError, 'rightEstimate':rightEstimate, 'rewardedTrials': rewardedTrials, 'rightPrediction':rightPrediction}
saveAllVars(outputPath,variables)
print 'Calculation finished in ', (time.time()-begin), 'seconds.'
Beispiel #41
0
def get_LA90():
    #a weigting from 31.5 to 8k Hz in 1/3 octave
    A_weighting = np.array([
        -39.4, -34.6, -30.2, -26.2, -22.5, -19.1, -16.1, -13.4, -10.9, -8.6,
        -6.6, -4.8, -3.2, -1.9, -0.8, 0, 0.6, 1, 1.2, 1.3, 1.2, 1, 0.5, -0.1,
        -1.1
    ])
    flower, fcentre, fupper = third_octave_bands(
    )  # fcentre from 31.5 Hz to 16k Hz

    samplerate, data = wavfile.read('MS_PCM_signed_16bit.wav')

    how_many_seconds = int(data.shape[0] / samplerate)
    level_a_each_sec = []
    adjust_list = []
    LAeq1_4s = [47.3, 48.1, 48.9, 49.4]

    # caluclate adjust values
    for n in range(4):
        data_slice = data[n * samplerate:(n + 1) * samplerate]
        xf, levels = fft_wave_data(samplerate, data_slice, NFFT=16384 * 2)
        fft_df = pd.DataFrame({'Frequency': xf, 'Level_dB': levels})

        level_lin = []
        for m in range(len(fcentre)):
            low, fc, up = flower[m], fcentre[m], fupper[m]
            temp_df = fft_df[(fft_df['Frequency'] >= low)
                             & (fft_df['Frequency'] < up)]
            levels = temp_df['Level_dB'].values
            Lp = 10. * np.log10(sum(10**(levels / 10)))
            level_lin.append(Lp)

        level_a_not_adjusted = np.array(level_lin) + A_weighting
        total_a = 10. * np.log10(sum(10**(level_a_not_adjusted / 10)))
        adjust_list.append(total_a - LAeq1_4s[n])
    adjust = mean(adjust_list)

    # calculuate the spectrum for each sec
    for n in range(how_many_seconds):
        # if n >100:
        #     break
        data_slice = data[n * samplerate:(n + 1) * samplerate]
        xf, levels = fft_wave_data(samplerate, data_slice, NFFT=16384 * 2)
        fft_df = pd.DataFrame({'Frequency': xf, 'Level_dB': levels})

        level_lin = []
        for m in range(len(fcentre)):
            low, fc, up = flower[m], fcentre[m], fupper[m]
            temp_df = fft_df[(fft_df['Frequency'] >= low)
                             & (fft_df['Frequency'] < up)]
            levels = temp_df['Level_dB'].values
            Lp = 10. * np.log10(sum(10**(levels / 10)))
            level_lin.append(Lp)

        level_a = np.array(level_lin) + A_weighting - adjust
        level_a_each_sec.append(level_a)

    # write to pandas DataFrame and export to csv
    freqs = ['%d' % int(c) for c in fcentre]
    df = pd.DataFrame(np.array(level_a_each_sec), columns=freqs)
    df.to_csv('LAeqT_per_sec.csv')  # taking  long time to write file !!
    print(df.tail())

    La90 = []
    for f in freqs:
        ss = df[f].sort_values()  # ascending
        La90.append(ss[int(0.1 * ss.shape[0])])
    La90_df = pd.DataFrame({'Frequency': freqs, 'LA90_dB': La90})
    La90_df.to_csv('La90_at_tird_octave.csv')
    print(La90_df)
Beispiel #42
0
 def _moving_mean(self):
     """
         Returns mean of the last n prices
     """
     return mean(self.last_n_prices)
def svr(C, gamma, eps):
    
    #initialization of data wmproxy
    traininput, traintarget, testinput, testtarget = initialize_wmproxy()
    #training of the SVR
    
    #scaling values in training and test targets
    
    for i in range(len(traintarget)):
        if(traintarget[i] != 0):
            traintarget[i] = log(traintarget[i])
        if(traininput[i] != 0):
            traininput[i] = log(traininput[i])
            
    
    for i in range(len(testtarget)):
        if(testtarget[i] != 0):
            testtarget[i] = log(testtarget[i])
        if(testinput[i] != 0):
            testinput[i] = log(testinput[i])
    
    avg = mean(traintarget)
    sigma = std(traintarget)
    maxtrain = len(traintarget)
    C = max([abs(avg + sigma), abs(avg - sigma)])
    print "C is equal to %f" % C

    svr = SVR(traininput[maxtrain-1440:maxtrain], testinput, traintarget[maxtrain-1440:maxtrain],gamma,C,eps,eps)
    
    
    out = svr.svr_req(testinput[0:30])
    
    error = 0
    for i in range(len(out)):
        error += (out[i] - testtarget[i])
    
    mean_error = error / len(out)
    variance = 0
    for i in range(len(out)):
        variance = abs(out[i] - mean_error)
    
    variance /= len(out)
    
    print "Variance = %f" % variance
    
    epsilon = 3*variance*sqrt(log(len(out))/len(out))
    
    print "Epsilon = %f" % epsilon
    #calculation of the metrics
    sme = svr.calc_sme(testtarget[0:30], out)
    mape = svr.calc_mape(out, testtarget[0:30])
    predx = svr.calc_pred(out, testtarget[0:30], 25)
    rsq = svr.calc_rsqr(out, testtarget[0:30])
    print out
    print testtarget[0:30]
    # print model results!
    x = array(testinput[0:30], dtype=int32)
    y = array(testtarget[0:30], dtype=int32)
    xp = array(testinput[0:30], dtype=int32)
    yp = array(out, dtype=int32)
    fig = figure()
    ax1 = fig.add_subplot(1,1,1)
    ax1.title.set_text("Predizioni modello SVR con C= %f, Gamma = %f, Eps = %f" % (C, gamma, eps))
    realvalues = ax1.plot(x, y)
    predictedvalues = ax1.plot(xp,yp,"r")
    ax1.axis([8.9,max(xp)+0.5,0,max(y)+10])
    ax1.set_xlabel('minutes of the week')
    ax1.set_ylabel('number of requests')
    legend([realvalues,predictedvalues], ["Real Values","Predicted Values"])
    
    fig.savefig("svr_model_%f" % time(), format='png')
    
    print "SME = %f" % sme
    print "MAPE = %f" % mape
    print "R^2 = %f" % rsq
    print "PREDX = %f" % predx
def hmm(states_nuber):
    
    #initialization of data wmproxy
#    traininput, traintarget, testinput, testtarget = initialize_wmproxy()

    #initialization of EWS data
    traininput, traintarget, pred_test, testinput, testtarget = initialize_ews()

    ## In this case we will try out performance of HMM considering just Monday! We will concatenate all series of data representing Monday workload!
    ## With EWS service we have three weeks as training and one week as test 
#    trainelements = []
#    traintarget_new = zip(*traintarget[0])
#    
#    for mon in traintarget_new:
#        trainelements += mon
#    
#    trainelements = log(trainelements)
#    
#    print "Monday training = %s" % trainelements
    model = HMM(traintarget, states_nuber, 264)
    
    
    test = pred_test[0:4]
#    test = traintarget[1370:1409]
#    for i in range(len(testtarget)):
#            if(testtarget[i] != 0):
#                testtarget[i] = numpy.log(testtarget[i])
#            else:
#                testtarget[i] = 1.0/100000000000
#
#    
#    This function predict a timewindow X startinf from a test sequence
    states = model.hmm_req(test, 30)
   
#    This function is for the integrity check of the model (how it fit the training set)
#    seq = EmissionSequence(model.sigma, traintarget)
#    states = model.m.viterbi(seq)
#    states = states[0]



    ttarget = []
    print "States2"
    print states
    meanout = []
    for state in states:
        li = model.m.getEmission(state)
#        maxes = numpy.where(array(li) > max(li)*0.4)[0]
#        print maxes
#        meanout.append(li.index(li[max(maxes)]))
#        maxvals = [li.index(li[maxval]) for maxval in maxes]
        maxes = nlargest(5, li)
        meanout.append(li.index(maxes[0]))
        maxvals = [li.index(maxval) for maxval in maxes]
        ttarget.append(maxvals)
##    sme = sme_calc(ttarget, testtarget[counter])
#    print ttarget
#    
    minout = []
    maxout = []
    
    for element in ttarget:
        minout.append(min(element))
        maxout.append(max(element))
        
    print len(meanout)
#    print "minout %s: " % minout
#    print "meanout %s: " % meanout
#    print "maxout %s: " % maxout
     
    
    x = array(traininput[0:30], dtype=int32)
    y = array(pred_test[5:35], dtype=int32)
    xp = array(traininput[0:30], dtype=int32)
    yp = array(minout, dtype=int32)
    xp1 = array(traininput[0:30], dtype=int32)
    yp1 = array(maxout, dtype=int32)
    xp2 = array(traininput[0:30], dtype=int32)
    yp2 = array(meanout, dtype=int32)
    fig = figure()
    
    print "len x = % d" % len(x)
    print "len y = % d" % len(y)
    
    ax1 = fig.add_subplot(1,1,1)
    ax1.title.set_text("Predizioni modello HMM con %d stati" % (states_nuber))
    realvalues = ax1.plot(x, y)
    minpred = ax1.plot(xp,yp,"r")
    maxpred = ax1.plot(xp1,yp1,"g")
    avgpred = ax1.plot(xp2,yp2,"y")
#    ax1.axis([8.9,max(xp)+0.5,0,max(y)+10])
    ax1.set_xlabel('minutes of the week')
    ax1.set_ylabel('cluster')
    legend([realvalues, minpred, avgpred, maxpred], ["Real Values", "Minimum Predicted Values","Average Predicted Values","Maximum Predicted Values"])
#    fig.savefig("hmm_model_%f.png" % time(), format='png')
    
#    sme = model.sme_calc(ttarget, testtarget[10:30])
#    mape = model.mape_calc(ttarget, testtarget[10:30])
#    predx = model.pred_calc(ttarget, testtarget[10:30], 25)
#    rsq = model.rsqr_calc(ttarget, testtarget[10:30])
#    
#    print "SME = %f" % sme
#    print "MAPE = %f" % mape
#    print "R^2 = %f" % rsq
#    print "PREDX = %f" % predx
    
    
    # Compute the error of the on the worst case and the most probable value of the probabilities
    max_error = []
    mean_error = []
    
    for i in range(len(maxout)):
        max_error.append(maxout[i] - traintarget[i])
        mean_error.append(meanout[i] - traintarget[i])
    
    fig2 = figure()
    
    ax = fig2.add_subplot(1,1,1)
    ax.title.set_text("Errore rispetto alle predizioni: worst case e most probable (%d Stati)" % (states_nuber))
    maxerr = ax.plot(x,array(max_error, dtype=int32))
    meanerr = ax.plot(x,array(mean_error, dtype=int32))
    
    legend([maxerr, meanerr], ["WC error", "MP error"])
    
    print "Mean WC error"
    print mean(max_error)
    print "Mean WC error (abs)"
    print mean(absolute(max_error))
    print "Mean MP error"
    print mean(mean_error)
    print "Mean MP error (abs)"
    print mean(absolute(mean_error))
    print "Number of underestimations"
    print len(numpy.where(array(max_error) < 0)[0])
    return model, pred_test
def mcmc():
    
    
    from thesis.scripts.bayesian import requestModel_nocl
    model = MCMC(requestModel_nocl)
    
    traintarget = model.traintarget
    testtarget = model.testtarget
    traininput = model.traininput
    testinput = model.testinput
    
    
    starttime = time()
    iter = 1000
    model.sample(iter=iter, burn=200, thin=10)
    print "Training time"
    print time() - starttime
    
    for i in range(len(testinput)):
        testinput[i] -= 10080
    
    reqs = poisson_req_nocl(model, testinput[0:30], testtarget[0:30])
    
    ttarget = []
    for prob in reqs:
        
        m = max(prob)
        el = pylab.find(prob > m*2/3)
        maxes = nlargest(15, prob)
        maxvals = [prob.index(maxval) for maxval in maxes]
        ttarget.append(maxvals)
        
#    sme = sme_calc_nocl(ttarget, testtarget[0:20])
#    mape = mape_calc(ttarget, testtarget[0:20])
#    predx = pred_calc(ttarget, testtarget[0:20], 0.25)
#    rsq = rsqr_calc(ttarget, testtarget[0:20])    
#        
#    print "SME = %f" % sme
#    print "MAPE = %f" % mape
#    print "R^2 = %f" % rsq
#    print "PREDX = %f" % predx

    minout = []
    maxout = []
    meanout = []
    
    for element in ttarget:
        minout.append(min(element))
        maxout.append(max(element))
        meanout.append(mean(element))
        
    
    print "minout %s: " % minout
    print "meanout %s: " % meanout
    print "maxout %s: " % maxout
     
    
    x = array(testinput[0:30], dtype=int32)
    y = array(testtarget[0:30], dtype=int32)
    xp = array(testinput[0:30], dtype=int32)
    yp = array(minout, dtype=int32)
    xp1 = array(testinput[0:30], dtype=int32)
    yp1 = array(maxout, dtype=int32)
    xp2 = array(testinput[0:30], dtype=int32)
    yp2 = array(meanout, dtype=int32)
    fig = figure()
    ax1 = fig.add_subplot(1,1,1)
    ax1.title.set_text("Predizioni modello MCMC con %d iterazioni" % (iter))
    realvalues = ax1.plot(x, y)
    minpred = ax1.plot(xp,yp,"r")
    maxpred = ax1.plot(xp1,yp1,"g")
    avgpred = ax1.plot(xp2,yp2,"y")
#    ax1.axis([8.9,max(xp)+0.5,0,max(y)+10])
    ax1.set_xlabel('minutes of the week')
    ax1.set_ylabel('number of requests')
    legend([realvalues,minpred, avgpred, maxpred], ["Real Values","Minimum Predicted Values","Average Predicted Values","Maximum Predicted Values"])
    fig.savefig("mcmc_model_%f" % time(), format='png')
Beispiel #46
0
        subplot(111, xscale="log")
    xlabel("Episodes")
    ylabel("Performance")
    legends = []
    for k in xrange(nPerfFiles):
        # print "- ",sys.argv[1+k*2]
        legends.append(sys.argv[1 + k * 2])
        lines = open(sys.argv[2 + k * 2], "r").readlines()
        # Remove commented lines
        while lines[0][0] == "#":
            del lines[0]
        # Then, the first line contains the total number of episodes
        # after each iteration
        strNEpis = lines.pop(0).split(" ")
        nIters = len(strNEpis)
        nEpis = empty(nIters)
        for i in xrange(nIters):
            nEpis[i] = float(strNEpis[i])
        # Now we can go through the perf of each trial and each iteration
        nTrials = len(lines)
        perf = empty((nTrials, nIters))
        for i in xrange(nTrials):
            sp = lines[i].split(" ")
            for j in xrange(nIters):
                perf[i, j] = float(sp[j])
        # plot(nEpis,mean(perf, axis=0))
        errorbar(nEpis, mean(perf, axis=0), yerr=std(perf, axis=0), label=sys.argv[1 + k * 2])
    # legend(legends,loc='best')
    legend(loc="best")
    show()
Beispiel #47
0
 def GetVelFromQ(self,el):
     '''
     Computing velocity profile in terms of the flow rate,
     using inverse womersley method of Cezeaux et al.1997
     '''
     self.radius = mean(el.Radius)        
     self.Res = el.R
     self.length = el.Length
     self.Name = el.Name
     Flow = mean(self.signal)
     
     #WOMERSLEY NUMBER
     self.alpha = self.radius * sqrt((2.0 *pi*self.density)/(self.tPeriod*self.viscosity))
     self.Wom = self.alpha
     self.Re = (2.0*Flow*self.SimulationContext.Context['blood_density'])/(pi*self.radius*self.SimulationContext.Context['dynamic_viscosity'])
     
     #FOURIER SIGNAL
     k = len(self.signal)
     n = 0
     while n < (self.nHarmonics):
         An = 0
         Bn = 0
         for i in arange(k):
             An += self.signal[i] * cos(n*(2.0*pi/self.tPeriod)*self.dt*self.nSteps[i])
             Bn += self.signal[i] * sin(n*(2.0*pi/self.tPeriod)*self.dt*self.nSteps[i])
         An = An * (2.0/k)
         Bn = Bn * (2.0/k)
         self.fourierModes.append(complex(An, Bn))
         n+=1
         
     self.fourierModes[0] *= 0.5   #mean Flow, as expected. It's defined into xml input file.
     
     self.Steps = linspace(0,self.tPeriod,self.samples)
     
     self.VelRadius = {}
     self.VelRadiusSteps = {}
     self.VelocityPlot = {}
     for step in self.Steps:
         self.Velocity = {}
         y = -1 # raggio da -1 a 1, 200 punti.
         while y <=1.:
             self.VelRadius[y] = 2*(1.0**2 - y**2)*self.fourierModes[0]
             y+=0.01
             
         k=1
         while k < self.nHarmonics: 
             cI = complex(0.,1.)
             cA = (self.alpha * pow((1.0*k),0.5)) * pow(cI,1.5)      
             c1 = 2.0 * jn(1, cA)
             c0 = cA * jn(0, cA)
             cT = complex(0, -2.0*pi*k*self.t/self.tPeriod)  
             y=-1 #da -1 a 1  #y=0 #centerline
             while y<=1.0:
                 '''vel computation'''
                 c0_y = cA * jn(0, (cA*y))
                 vNum = c0-c0_y
                 vDen = c0-c1
                 vFract = vNum/vDen
                 cV = self.fourierModes[k] * exp(cT) * vFract
                 self.VelRadius[y] += cV.real #valore di velocity riferito al raggio adimensionalizzato
                 self.Velocity[y] = self.VelRadius[y].real
                 y+=0.01
             k+=1
         
         unsortedRadii = []
         for rad, vel in self.Velocity.iteritems():
             unsortedRadii.append(rad)
         radii = sorted(unsortedRadii)
         
         self.VelPlot = []
         for x in radii:
             for rad, vel in self.Velocity.iteritems():
                 if x == rad:
                     self.VelPlot.append(vel*(100.0/(self.radius**2*pi)))                   
         self.VelocityPlot[step] = self.VelPlot
         self.t += self.dtPlot
Beispiel #48
0
def getRootMeanSquaredErrors(calculated,real):
    d = calculated-real
    d = d**2
    m = mean(d)
    sm = sqrt(m)
    return sm
Beispiel #49
0
 statList = []
 learningCurveStatList = []
 for dataset in datasetNames:
     print dataset
     (data, labels) = readData(dataDir, dataset)
     for algorithm in algorithmList:
         print algorithm.__name__, '\t',
         accuracyList = []
         #the learning curve list has one list of values for each training set size
         #so if we're trying a training set of size 10, learningCurveList[10] will be a
         #list of the accuracies from the test with training size 10
         learningCurveList = []
         for x in range(len(labels)):
             learningCurveList.append([])
         crossValidation(numCrossValidationFolds, data, labels, algorithm, accuracyList, learningCurveList, numLearningCurveIterations, learningCurveIndexMod)
         statList.append((dataset, algorithm.__name__, mean(accuracyList), std(accuracyList)))
         learningCurveStatList.append((dataset, algorithm.__name__, 
                                       [mean(x) for x in learningCurveList],
                                       [std(x) for x in learningCurveList]))
         
 outFile = open(path.join(figureDir, "table.txt"), 'a')
 outFile.write('algorithm')
 for ds in datasetNames:
     outFile.write('&& ' + ds)
 outFile.write('\\\\ \n')
 
 for alg in [x.__name__ for x in algorithmList]:
     outFile.write(alg)
     for ds in datasetNames:
         relevantTupList = [x for x in statList if x[0] == ds and x[1] == alg]
         if len(relevantTupList) != 1: