示例#1
0
def group(request, id):
    c = {}
    group_info = get_group(request.session['token'], id)
    c['group_info'] = group_info
    c['member_map'] = {member[u'user_id']: member[u'nickname'] for member in group_info[u'members']}
    request.session['member_map'] = c['member_map']

    if cache.get('group-%s' % id) and settings.CACHE_ENABLED:
        c['group'] = cache.get('group-%s' % id)
        return render(request, 'group.html', c)
    if request.GET.get('ajaxLoad', '0') == '0' and settings.CACHE_ENABLED:
        return render(request, 'group-loader.html', c)
    get_attachment = lambda x: x[0].get('url', None) if len(x) else None
    try:
        group = Group.objects.get(id=id)
        msgs = list(Message.objects.filter(group=group).order_by('created'))
        if msgs:
            after_id = msgs[-1].id
            if not str(after_id).isdigit():
                after_id = 0
            elif str(after_id) != str(group_info[u'messages'][u'last_message_id']):
                msgs += [Message(
                        id=msg[u'id'],
                        created=datetime.fromtimestamp(float(msg[u'created_at'])),
                        author=msg[u'user_id'] if msg[u'user_id'] != 'system' else 0,
                        text=msg[u'text'],
                        img=get_attachment(msg[u'attachments']),
                        likes=msg[u'favorited_by'],
                        n_likes=len(msg[u'favorited_by'])
                        ) for msg in msg_concurrent(request.session['token'], id, after_id=after_id, n_workers=(int(group_info[u'messages'][u'count'])/10 + 1))]
        group.analysis = analysis(request, msgs, group_info)

    except Group.DoesNotExist:
        msgs = [Message(
                id=msg[u'id'],
                created=datetime.fromtimestamp(float(msg[u'created_at'])),
                author=msg[u'user_id'] if msg[u'user_id'] != 'system' else 0,
                text=msg[u'text'],
                img=get_attachment(msg[u'attachments']),
                likes=msg[u'favorited_by'],
                n_likes=len(msg[u'favorited_by'])
                ) for msg in msg_concurrent(request.session['token'], id, n_workers=(int(group_info[u'messages'][u'count'])/10 + 1))]

        group = Group(id=id, analysis=analysis(request, msgs, group_info))
        def save_msg(m):
            m.group = group
            m.save()
        map(lambda m: save_msg(m), msgs)
    if settings.CACHE_ENABLED:
        cache.set('group-%s' % id, group, 180)
    group.save()
    c['group'] = group
    return render(request, 'group.html', c)
示例#2
0
 def analysis(self):
     # 計算對中數量
     counts = [
         analysis(
             self.history.iloc[i, 1:13].values.tolist(),
             self.deck
         ) for i in range(self.tableRows)
     ]
     self.history["對中數量"] = counts
示例#3
0
 def data_stats(self, sample_size):
     """compute sigma and mu of each frequency bin in noise dir, from DeepXi"""
     if os.path.exists(self.noise_dir + 'stats.npz'):
         with np.load(self.noise_dir + 'stats.npz') as stats:
             self.mu = stats['mu_hat']
             self.sigma = stats['sigma_hat']
     else:
         print('Start saving stats')
         samples = []
         for idx in range(sample_size):
             snr = random.choice(self.snr_level)
             speech_file = random.choice(self.speech_wav_files)
             speech_src, _ = librosa.load(speech_file, sr=self.sr)
             noise_file = random.choice(self.noise_wav_files)
             noise_src, _ = librosa.load(noise_file, sr=self.sr)
             start_idx = random.randint(0, len(noise_src) - len(speech_src))
             noise_src = noise_src[start_idx:start_idx + len(speech_src)]
             _, alpha = utils.add_noise(
                 speech_src, noise_src,
                 snr)  # get scale factor based on snr
             noise_src = noise_src * alpha
             # do stft for both speech and noise
             _, sample_speech_mag, _ = utils.analysis(
                 speech_src, self.frame_len, self.frame_shift, self.n_fft)
             _, sample_noise_mag, _ = utils.analysis(
                 noise_src, self.frame_len, self.frame_shift, self.n_fft)
             # compute prior snr between speech and noise spectrums
             snr_db = utils.prior_snr(
                 sample_speech_mag,
                 sample_noise_mag)  # instantaneous a prior SNR (dB).
             samples.append(np.squeeze(snr_db))
         samples = np.hstack(samples)
         if len(samples.shape) != 2:
             raise ValueError('Incorrect shape for sample.')
         stats = {
             'mu_hat': np.mean(samples, axis=1),
             'sigma_hat': np.std(samples, axis=1)
         }
         self.mu, self.sigma = stats['mu_hat'], stats['sigma_hat']
         np.savez(self.noise_dir + 'stats.npz',
                  mu_hat=stats['mu_hat'],
                  sigma_hat=stats['sigma_hat'])
         print('Sample statistics saved.')
示例#4
0
 def showResult(self):
     self.deck = self.deckInput.getValue()
     result = self.getNewest()
     radNo = result.radno
     date = result.date
     count = analysis(result(), self.deck)
     self.radNoText.SetLabel(str(radNo))
     self.dateText.SetLabel(str(date))
     for i, t in enumerate(result()):
         self.winningNumbers[i].SetLabel(t)
         if t in self.deck:
             self.winningNumbers[i].SetForegroundColour("#00c853")
         else:
             self.winningNumbers[i].SetForegroundColour("#000000")
     self.oddEvenText.SetLabel(oddAndEven(result()))
     self.smallLargeText.SetLabel(smallAndLarge(result()))
     self.countText.SetLabel("中了%s個號碼!!" % count)
示例#5
0
    for index, (img,mask) in enumerate(val_loader):
        Model.eval()
        DSC = 0
        PPV = 0
        Sen = 0
        batch = 0
        img = img.to(device)
        mask = mask.to(device)
        
        with torch.no_grad():

            output = Model(img)           
            output = torch.ge(output, 0.5).type(dtype=torch.float32) #二值化
            output = utils.post_process(output) #后处理

            DSC ,PPV, Sen, batch = utils.analysis(output,mask)
            DSC_sum += DSC*batch
            PPV_sum += PPV*batch
            Sen_sum += Sen*batch
            batch_sum += batch
            if index%save_step==0:
                img_list = [
                    img[0,:,:,:],
                    output[0,:,:,:],
                    mask[0,:,:,:]
                ]
                img_visualize = vutils.make_grid(img_list)
                visualize_img_path = visualize_path+str(epoch)+'_'+str(index+1)+'.tif'
                vutils.save_image(img_visualize, visualize_img_path)
        sys.stdout.write("\r[Val] [Epoch {}/{}] [Batch {}/{}] [DSC:{:.5f}] [PPV:{:.5f}] [Sen:{:.5f}]".format(epoch+1, stop_epoch, index+1, len(val_loader), DSC, PPV, Sen))
        sys.stdout.flush()
示例#6
0
    'レース分析', split_detail[0] + split_detail[1].zfill(2) + split_detail[2].zfill(2)), race + race_name)
os.makedirs(directoryName, exist_ok=True)
print(race)
race_level = getLevel(str(race_data.find(class_='RaceData02')))
with open(os.path.join(directoryName, textFileName), 'w') as f:
    f.write(link + '\n')
    f.write('\n'.join(link_list))
race_data01_class = str(race_data.find_all(class_='RaceData01')).split('m')[0]
distance = int(race_data01_class[-4:])
course = race_data01_class[-5]
horse_tag_list = race_data.find_all(class_='HorseName')
# horse_sex_list = race_data.find_all(class_='Barei Txt_C')
weight_list = race_data.select('td[class="Txt_C"]')
link = link.replace('shutuba', 'data_top')
average_pace_info = getAveragePaceInfo(directoryName, link_list, distance)
track_list = ['札幌', '函館', '福島', '新潟', '東京', '中山', '中京', '京都', '阪神', '小倉']
with open(os.path.join(directoryName, '平均指数表.csv'), 'w', newline='', encoding='cp932') as f:
    writer = csv.writer(f)
    writer.writerow(['馬名', '分析レース数', '平均トータル指数', '平均先行指数', '平均スタミナ指数', '平均上がり指数', '高速平均トータル指数', '中速平均トータル指数', '低速平均トータル指数',
                     '枠複勝率良', '種牡馬複勝率良', '枠複勝率稍', '種牡馬複勝率稍', '枠有利度重', '種牡馬有利度重', '枠有利度不', '種牡馬有利度不'])
for i, horse_tag in enumerate(horse_tag_list):
    link = getLink(horse_tag)
    horse_name = getTitle(horse_tag)
    if i != 0:
        # horse_sex = getRowElement(horse_sex_list[i - 1], 0)[1]
        horse_weight = getRowElement(weight_list[i - 1], 0)[1]
        num = getNum(i, len(horse_tag_list) - 1)
    if link[0] and horse_name[0]:
        analysis(directoryName, average_pace_info,
                 course, horse_name[1], link[1], track_list[int(trackId) - 1], num, float(horse_weight), race_level)
示例#7
0
    data = AbileneMat['P']
    
    n = data.shape[0]    
    
    e_high = 0.98
    e_low = 0.96
    alpha = 0.96
    sci = -1

    holdOFF = 50

    # My version 
    res_me = FRAHST_V3(data, alpha=0.96, e_low=0.96, e_high=0.98, sci = -1, \
    holdOffTime=holdOFF, r = 1, evalMetrics = 'F') 

    metric_me, sets_me, anom_det_tab_me = analysis(res_me, AbileneMat['P_g_truth_tab_alt'], n)

    new_metrics_me = scoreEvaluation(n, res_me['anomalies'], 
                                  AbileneMat['P_g_truth_tab_alt'], ignoreUpTo = 400)
                                  
    # Pedros Version
    res_ped = frahst_pedro(data, r = 1, alpha=0.96, energy_low=0.96, energy_high=0.98,  \
    holdOffTime=holdOFF, evaluateTruth='FALSE')
    
    metric_ped, sets_ped, anom_det_tab_ped = analysis(res_ped, AbileneMat['P_g_truth_tab_alt'], n)
    
    new_metrics_ped = scoreEvaluation(n, res_ped['anomalies'], 
                                  AbileneMat['P_g_truth_tab_alt'], ignoreUpTo = 400)