def main(): from results import get_results stats, args = get_results() plot_JS_EN_scatter_by_pairs(stats, **vars(args)) return 0
def result_page(year, *args, duration=False, **kwargs): if lock.acquire(timeout=10): if (datetime.now() - cache[duration][year]['ts']).seconds < LIMIT: response = cache[duration][year]['data'] logging.info('Refresh too soon, using cache') else: total = results.get_results(year=year, convert_ts=True, duration=duration) if total is None: # something failed logging.error('Failed retrieving data, using cache') response = cache[duration][year]['data'] else: data = total.to_dict(orient='records') columns = list(total.columns) ts = datetime.now() response = json.dumps(dict(data = data, columns=columns, ts=ts.strftime('%Y-%m-%d %H:%M'))) response = response.replace('NaN', 'null') response = response.replace('NaT', '') cache[duration][year]['data'] = response cache[duration][year]['ts'] = ts lock.release() else: logging.warning("Can't acquire lock, returning cached response") response = cache[duration][year]['data'] return response
def main(): input_image = cv2.imread( 'image2.tiff', cv2.IMREAD_GRAYSCALE) # Read local image in grayscale image_with_sp = add_sp_noise(input_image, prob_salt=0.2, prob_pepper=0.2) # Add salt and pepper noise cv2.imshow('Original Image', input_image) # Display Original image cv2.imshow('Noisy Image', image_with_sp) # Display image with Salt and Pepper Noise num_passes = 5 # number of passes for filtering # Make results directory with timestamp timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') if not os.path.exists('results/'): os.mkdir('results/') os.mkdir('results/' + timestamp) # Carry out median filtering for i in range(num_passes): if i == 0: restored_image = median_filter( image_with_sp) # apply filter to noisy image else: restored_image = median_filter( restored_image) # apply filter to result of previous pass mse, diff = compare_images( input_image, restored_image ) # compare images to get mean square error and difference # Call the get results function with the required data get_results(input_image, image_with_sp, restored_image, diff, mse, 'Pass %d' % (i + 1), 'results/' + timestamp + '/pass_%d.png' % (i + 1)) cv2.imshow('Restored: Pass-%d' % num_passes, restored_image) # Display the final restored image cv2.imshow( 'Difference', diff ) # Display the difference between the original image and final restored image cv2.waitKey(0) # Press any key to terminate window cv2.destroyAllWindows() # Terminate all windows to end the program
def send_results(update, context): context.user_data["keyword"] = update.message.text context.user_data["chat_id"] = update.message.chat_id msgs, markups = get_results(update, context) context.user_data["msgs"] = msgs context.user_data["markups"] = markups context.user_data["current_page"] = 0 update.message.reply_text(msgs[0], reply_markup=markups[0])
def results(): auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_token, config.access_token_secret) api = tweepy.API(auth) try: user = request.args.get('q') return_json = jsonify(get_results(api, user)) except: return (jsonify({'status': 0})) # 0 error, 1 passes return (return_json)
def test_get_results(): '''Check that there are nfiles results, all successful.''' run_empty() results = get_results('empty.mutants', 'empty.results') assert len(results) == nfiles positions = [result[0] for result in results] symptoms = [1 for result in results if result[1] == 'silent' and result[2] == '0'] assert len(symptoms) == nfiles assert set(positions) == set(range(nfiles)) shutil.rmtree('empty.mutants') os.remove('empty.results')
def list_results(): win = Toplevel() win.wm_title("resultslist") txt = scrolledtext.ScrolledText(win, width=20, height=40, undo=True) txt['font'] = ('consolas', '12') txt.pack(expand=True, fill='both') txt.grid(row=0, column=0) txt.insert(1.0, results.get_results()) b = Button(win, text="Okay", command=win.destroy) b.grid(row=1, column=0)
def get_results_request(): res = get_results() return json.dumps(res)
# Clean Survey Results import clean_last new_drivers = clean_last.clean_last_qns(data) import clean_others new_df, new_features = clean_others.clean(data) # Note: i_3 and o_3 are dropped for model improvements new_features = new_features.drop(["i_3", "o_3"], axis=1) # Load Prediction Model rf = pickle.load(open("rf.sav", 'rb')) # Execute Prediction Model on New Survey Results import results new_results_individual, new_results_department, new_results_job_level, new_results_age, new_results_organisation = results.get_results( rf, new_df, new_features) # Generate Report for Frontend & Storing to Database import report report_type_3_age, report_type_3_job_level, report_type_3_department, report_type_4_wellbeing, report_type_4_opinions, report_type_4_personality, report_type_4_core_values, report_type_5 = report.gen_report( new_results_individual, new_results_age, new_results_job_level, new_results_department) # Convert Index to Column for Storage in MongoDB as unique identifier new_results_age['Age Category'] = new_results_age.index new_results_department['Department'] = new_results_department.index new_results_job_level['Job Level'] = new_results_job_level.index import data_upload data_upload.upload(report_type_4_wellbeing, report_type_4_opinions, report_type_4_personality, report_type_4_core_values,
def results_to_list(loci, mutation='point'): '''return results as list''' run_screen(loci, mutation) rlist = get_results(mutation + '.mutants', mutation + '.results') return rlist
def local_endpoint(): return get_results(API_URI)
def main(): from results import get_results stats, args = get_results() plot_ENS_hexbin(stats, **vars(args)) # vb_two
def main(): stats, args = get_results() plot_bar(stats, **vars(args)) return 0
def main(): from results import get_results stats, args = get_results() plot_lrt_histograms(stats, **vars(args))