コード例 #1
0
def test_get_metrics(capsys: CaptureFixture, recaptcha_site_key: str) -> None:
    get_metrics(project_id=GOOGLE_CLOUD_PROJECT,
                recaptcha_site_key=recaptcha_site_key)
    out, _ = capsys.readouterr()
    assert re.search(
        f"Retrieved the bucket count for score based key: {recaptcha_site_key}",
        out)
コード例 #2
0
ファイル: bottle_test.py プロジェクト: iyersv/TestRepo
def recipes_list():
       conf = helper_functions.get_config()
       place = request.query.place
       if (place == ''):
         lat = request.query.lat
         lon = request.query.long
       else:
          place =place.replace(',',' ')
          lat,lon= helper_functions.get_lat_long(conf,place)
       lat_long = (lat,lon)
       j = get_metrics.validate_inputs(lat_long)
       k = get_metrics.get_neighbors(j[0])
       #  Now get the raw data for the station id.  Stop if there is one
       all_stations = []
       all_data = []
       all_stations = [(nrow['station_id'],nrow['distance']) for nrow in k if nrow['distance'] <30 ]
       #for nrow in k:
	  # all_stations.append((nrow['station_id'],nrow['distance'])	
       for row in k:
	try:
           print row['station_id']
	   v_station_id = row['station_id']
           v_distance = row['distance']
	   j=get_metrics.get_metrics(row['station_id'],datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m'))
	   for k,v in j.iteritems():
	      all_data.append((k,v['metric_value'],v['event_time']))
	   if len(all_data) >0:
        	return template('v',post=all_data,pos=lat_long,station_id=v_station_id,distance=v_distance,all_stations=all_stations)
	except:
	   print('No Data Found',sys.exc_info()[0])
	   return
コード例 #3
0
def combineImage(imgPath, roiPath):
    """
    Public method used to compress image based on MS-ROI
    
    Parameters
    ----------
    imgPath : str
        Original image path
    roiPath : str
        MS-ROI path
    
    Returns
    -------
    str, int, int, float, float
        compressed image path, compressed image size, uncompressed_size, PSNR, SSIM
    
    """
    original = Image.open(imgPath)
    sal = Image.open(roiPath)
    compressed, newSize, uncSize = make_quality_compression(
        imgPath, roiPath, original, sal)
    psnr, ssim = get_metrics(imgPath, compressed)

    return compressed, newSize, uncSize, psnr, ssim
コード例 #4
0
ファイル: lab_cnn_rb.py プロジェクト: RameshOswal/autotrader
                else:
                    buffer.add(state=bTrainX, action=bTrainY)
                    state, reward, action = buffer.get_batch(bsz=BSZ)
                    loss = model.optimize(sess, state, action)

                    losses.append(loss)

            print("Epoch {} Average Train Loss: {}, validating...".format(
                epoch, np.mean(losses)))
            losses = []
            allocation_wts = []
            price_change_vec = []
            for bEvalX, bEvalYFat in batch_gen.load_test():
                bEvalY = bEvalYFat[:, -1, :]
                pred_allocations = model.predict_allocation(sess, bEvalX)
                assert bEvalY.shape == pred_allocations.shape
                price_change_vec.append(bEvalY)
                allocation_wts.append(pred_allocations)

            true_change_vec = np.concatenate(price_change_vec)
            allocation_wts = np.concatenate(allocation_wts)

            random_alloc_wts = softmax(np.random.random(allocation_wts.shape))
            test_date = "_".join(batch_gen.dp.test_dates[IDX])
            m = get_metrics(dt_range=test_date)
            print("Our Policy:")
            m.apv_multiple_asset(true_change_vec,
                                 allocation_wts,
                                 get_graph=True,
                                 pv_0=INIT_PV,
                                 tag="epoch_{}".format(epoch))
コード例 #5
0
from glob import glob

# make the output directory to store the Q level images,
if not os.path.exists(args.output_directory):
    os.makedirs(args.output_directory)

if args.print_metrics:
    from get_metrics import get_metrics

if args.single:
    original = Image.open(args.image)
    sal = Image.open(args.map)
    a, b = make_quality_compression(original, sal)

    if args.print_metrics:
        get_metrics(args.image, a, b, original.size)

else:

    if args.dataset == 'kodak':
        image_path = 'images/kodak/*.png'
    elif args.dataset == 'large':
        image_path = 'images/output_large/ori_*.png'
    else:
        assert Exception("Wrong dataset choosen")

    for image_file in glob(image_path):
        if args.dataset == 'large':
            map_file = 'images/output_large/map' + image_file.split(
                '/')[-1][3:-4]
        elif args.dataset == 'kodak':
コード例 #6
0
ファイル: Main.py プロジェクト: dbaylies/MIRFinal
f_measure = 0

for i in np.arange(len(files)):

    filepath = audio_root + albums[album_num] + '/' + songs[i]

    chromagram, fs_chromagram = get_chromagram.get_chromagram(filepath, plot)
    N, recurrence, look_back = recurrence_matrix.recurrence_matrix(chromagram,fs_chromagram, plot)
    L = timelag_matrix.timelag_matrix(N, recurrence, plot)
    P = gaussian_matrix.gaussian_matrix(fs_chromagram, L, plot)
    c = novelty.novelty(P, plot)

    # Get relevant array
    seg_times_i = seg_times[files[i]]

    onset_a, onset_t = peak_pick.peak_pick(c, fs_chromagram, look_back, seg_times_i, plot)
    precision_, recall_, f_measure_ = get_metrics.get_metrics(seg_times_i, files, onset_t)

    precision += precision_
    recall += recall_
    f_measure += f_measure_

num_songs = len(files)

precision_avg = precision/num_songs
recall_avg = recall/num_songs
f_measure_avg = f_measure/num_songs

print('For ' + albums[album_num])
print('\nP: ' + str(precision_avg) + '\nR: ' + str(recall_avg) + '\nF: ' + str(f_measure_avg) + '\n')
コード例 #7
0
#image_path = "BenchmarkIMAGES/BenchmarkIMAGES/*.jpg"
jpeg_compression_quality = 50

count = 0
image = []
for image_file in glob(image_path):
    count = count + 1
    d = []
    d.append(count)
    image.append(d)
    out_name = compression_engine(image_file)
    jpeg_compression(image_file, jpeg_compression_quality)
    map_file = "msroi_map.jpg"
    original_image = image_file
    original = Image.open(original_image)
    get_metrics(original_image, "jpeg_compressed.jpg", out_name, original.size)

image.pop()

jpeg_data = pd.read_csv('jpeg_psnr.csv')
jpeg_data_list = jpeg_data.values.tolist()

semantic_data = pd.read_csv('semantic_psnr.csv')
semantic_data_list = semantic_data.values.tolist()

plt.plot(image, jpeg_data_list, color='g')
plt.plot(image, semantic_data_list, color='r')
plt.xlabel("Kodak Dataset Images")
plt.ylabel("JPEG vs Semantic")
plt.savefig("saved.png")
コード例 #8
0
    print('%0.2f%% done' % percent_done, '\r')

    all_papers = gm.get_paper_links(
        "http://www.biorxiv.org/content/early/recent?page=%d" % ipage)

    for ipapers in range(0, len(all_papers) - 1):

        #try:

        print("Browsing through paper #%d ..." % (ipapers + 1))

        url = "http://www.biorxiv.org" + all_papers[ipapers][
            'link'] + ".article-metrics"

        metrics = gm.get_metrics(url)
        if (metrics[0] == 'NA') and (metrics[1] == 'NA'):
            continue
        else:
            count = count + 1
            pass

        title = gm.get_title(url)

        citations = gm.get_citations(url, driver)

        sleep(random.randint(5, 30))

        #df = pd.DataFrame(np.array([1,2,3])[np.newaxis],index=[1],
        #columns=['a','b','c'])