def run(self): features = functions.features(self.seq[:30]) self.s_scores = functions.s_score(features) self.c_scores, self.cs_probs_all, self.cleavage_sites = functions.c_score( self.seq, max_scan=self.max_scan) self.y_scores = functions.np.sqrt(self.s_scores * self.c_scores)
def test_output(): post_URL = request.args.get("postURL") eg_subreddit = fu.subreddit(post_URL) postAtt = fu.rcommentLink(post_URL) eg_postTitle = postAtt.title eg_comment = [comment.body for comment in postAtt.comments][0] commentCreated = int([comment.created_utc for comment in postAtt.comments][0]) postCreated = postAtt.created_utc timeDiffC = fu.timePresent(round(commentCreated - postCreated)) timeDiffCur = fu.timePresent(round(time.time() - commentCreated)) comLength = fu.cLength(fu.comLength(eg_comment)) sentiment = "" sentiment = fu.sentiment(eg_comment) features = fu.features(eg_subreddit, post_URL, eg_comment, commentCreated) eg_prob = "" eg_prob = fu.probability(features) eg_score = [comment.score for comment in postAtt.comments][0] eg_commentForm = fu.commentSent(eg_comment) return render_template( "testOutput.html", eg_postTitle=eg_postTitle, eg_commentForm=eg_commentForm, eg_score=eg_score, eg_prob=eg_prob, sentiment=sentiment, comLength=comLength, timeDiffCur=timeDiffCur, timeDiffC=timeDiffC, eg_comment=eg_comment, post_URL=post_URL, eg_subreddit=eg_subreddit, )
def output(): post = request.args.get("post") comment = request.args.get("comment") postAtt = fu.rcommentLink(post) postTitle = postAtt.title the_subreddit = "" the_subreddit = fu.subreddit(post) the_post = "" the_post = post your_comment = "" your_comment = comment features = fu.features(the_subreddit, post, comment, time.time()) timePost = "" timePost = fu.timePresent(features[4]) comLength = "" comLength = fu.cLength(features[5]) sentiment = "" sentiment = fu.sentiment(your_comment) prob = "" prob = fu.probability(features) commentForm = fu.commentSent(your_comment) return render_template( "output.html", postTitle=postTitle, commentForm=commentForm, prob=prob, timePost=timePost, comLength=comLength, sentiment=sentiment, the_subreddit=the_subreddit, the_post=the_post, your_comment=your_comment, )
print "\n******* Analysis started", datetime.now()\ .strftime("%A, %d. %B %Y %I:%M%p"), "*******\n" print "Running GoShifter with following parameters:\n" for arg in args: print "\t", arg, args[arg] print "\n" args = validate_args(args) ### read pheno mappings snpInfoChr = data.readSnpMapByChr(args['--snpmap']) #### find median annotation size to expand LD boundries if args['--ld-extend'] == "False": expand = functions.features(args['--annotation']) * 2 else: expand = args['--ld-extend'] ### read peaks as interval tree peaksTree = functions.intervalTree(args['--annotation']) ### test for enrichment with peak shifting (random shift) functions.enrichPermRandBoundryPeakShift_tabixLd( snpInfoChr, args['--ld'], args['--rsquared'], args['--window'], expand, peaksTree, args['--min-shift'], args['--max-shift'], args['--permute'], args['--out'], args['--no-ld'], args['--proxies']) print "\n******* Analysis ended", datetime\ .now().strftime("%A, %d. %B %Y %I:%M%p"), "*******\n"
.strftime("%A, %d. %B %Y %I:%M%p"), "*******\n" print "Running GoShifter with following parameters:\n" for arg in args: print "\t", arg, args[arg] print "\n" args = validate_args(args) ### read pheno mappings snpInfoChr = data.readSnpMapByChr(args['--snpmap']) #### find median annotation size to expand LD boundries if args['--ld-extend'] == "False": expand = functions.features(args['--annotation-a'])*2 else: expand = args['--ld-extend'] ## merge overlapping annotation sites from A a_mergePeaks = functions.mergeTree(args['--annotation-a']) ## convert to intervalTree a_tree = functions.merge2IntervalTree(a_mergePeaks) ## merge overlapping annotations from B b_mergePeaks = functions.mergeTree(args['--annotation-b']) ## convert to intervalTree b_tree = functions.merge2IntervalTree(b_mergePeaks) ### test for enrichment with peak shifting (random shift) functions.enrich_shift_conditional_tabixLd(snpInfoChr,args['--ld'],\
#p_theme = {'color_1': '#ABABAB', 'color_2': '#ABABAB', 'color_3': '#ABABAB', 'font_color_1': '#ABABAB', # 'font_size_1': 12, 'font_size_2': 16} #p_dims = {'width': 1450, 'height': 800} #p_vlines = [data['timestamp'].head(1), data['timestamp'].tail(1)] #p_labels = {'title': 'Main title', 'x_title': 'x axis title', 'y_title': 'y axis title'} # cargar funcion importada desde GIST de visualizaciones #ohlc = vs['g_ohlc'](p_ohlc=data, p_theme=p_theme, p_dims=p_dims, p_vlines=p_vlines, p_labels=p_labels) # mostrar plot # ohlc.show() # -- ------------------------------------------------------------------------------- Feature Engineering -- # # Feature engineering (autoregressive and hadamard functions) data_t, data_y, data_x, features_names = fn.features(p_data=all_data, p_nmax=7) # Data scaling data_x = fn.data_trans(p_data=data_x, p_trans='Rubust') var = fn.variables(data_y, data_x, N=4) busqueda = fn.busqueda_en_train(var, n=4) # Rearange of data for regression model # Rearange of data for classification model data_cla = {'x_data': data_x, 'y_data': data_y['co_d']} # -- ---------------------------------------------------------------------------------- Feature analysis -- # # matriz de correlacion cor_mat = data_x.corr()