Beispiel #1
0
def scrubemail_filter(data, css_junk=''):
    return Markup(
        scrubemail(unicode(bleach.linkify(bleach.clean(data))),
                   rot13=True,
                   css_junk=css_junk))
Beispiel #2
0
def googlemap(*args, **kwargs):
    map = googlemap_obj(*args, **kwargs)
    return Markup("".join((map.js, map.html)))
Beispiel #3
0
 def index():
     script = server_document('http://localhost:5006/bok')
     return render_template('index.html',
                            bokeh_script=Markup(script),
                            options=plant_data)
Beispiel #4
0
def contact():
    name = "contact"
    content = Markup(markdown.markdown(open(settings.APP_PATH + "/contact/contact.md").read()))
    return render_template("contact/contact.html", **locals())
Beispiel #5
0
 def js(self):
     return Markup(self.render('googlemaps/gmapjs.html', gmap=self))
Beispiel #6
0
    def __repr__(self):
        doc_param = "-".join([str(x) for x in [self.unit, self.materialclass, self.doctype]])

        return '[ '+ str(self.quantity) +' ] '+ doc_param + ' by ' + str(self.created_by) + ' on ' + Markup(self.created_on) 
Beispiel #7
0
def home():
    name = "home"
    content = Markup(markdown.markdown(open(settings.APP_PATH + "/home/home.md").read()))
    return render_template("home/home.html", **locals())
Beispiel #8
0
class TableColumnInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
    datamodel = SQLAInterface(models.TableColumn)

    list_title = _('List Columns')
    show_title = _('Show Column')
    add_title = _('Add Column')
    edit_title = _('Edit Column')

    can_delete = False
    list_widget = ListWidgetWithCheckboxes
    edit_columns = [
        'column_name', 'verbose_name', 'description', 'type', 'groupby',
        'filterable', 'table', 'expression', 'is_dttm', 'python_date_format',
        'database_expression'
    ]
    add_columns = edit_columns
    list_columns = [
        'column_name', 'verbose_name', 'type', 'groupby', 'filterable',
        'is_dttm'
    ]
    page_size = 500
    description_columns = {
        'is_dttm':
        _('Whether to make this column available as a '
          '[Time Granularity] option, column has to be DATETIME or '
          'DATETIME-like'),
        'filterable':
        _('Whether this column is exposed in the `Filters` section '
          'of the explore view.'),
        'type':
        _('The data type that was inferred by the database. '
          'It may be necessary to input a type manually for '
          'expression-defined columns in some cases. In most case '
          'users should not need to alter this.'),
        'expression':
        utils.markdown(
            'a valid, *non-aggregating* SQL expression as supported by the '
            'underlying backend. Example: `substr(name, 1, 1)`', True),
        'python_date_format':
        utils.markdown(
            Markup(
                'The pattern of timestamp format, use '
                '<a href="https://docs.python.org/2/library/'
                'datetime.html#strftime-strptime-behavior">'
                'python datetime string pattern</a> '
                'expression. If time is stored in epoch '
                'format, put `epoch_s` or `epoch_ms`. Leave `Database Expression` '
                'below empty if timestamp is stored in '
                'String or Integer(epoch) type'), True),
        'database_expression':
        utils.markdown(
            'The database expression to cast internal datetime '
            'constants to database date/timestamp type according to the DBAPI. '
            'The expression should follow the pattern of '
            '%Y-%m-%d %H:%M:%S, based on different DBAPI. '
            'The string should be a python string formatter \n'
            "`Ex: TO_DATE('{}', 'YYYY-MM-DD HH24:MI:SS')` for Oracle "
            'Superset uses default expression based on DB URI if this '
            'field is blank.', True),
    }
    label_columns = {
        'column_name': _('Column'),
        'verbose_name': _('Verbose Name'),
        'description': _('Description'),
        'groupby': _('Groupable'),
        'filterable': _('Filterable'),
        'table': _('Table'),
        'expression': _('Expression'),
        'is_dttm': _('Is temporal'),
        'python_date_format': _('Datetime Format'),
        'database_expression': _('Database Expression'),
        'type': _('Type'),
    }
Beispiel #9
0
 def link(self):
     name = escape(self.datasource_name)
     return Markup('<a href="{self.url}">{name}</a>').format(**locals())
Beispiel #10
0
def Tweets_Results():
	_keyword = request.form['topic']
	_number = request.form['tweets']
	_number = int(_number)
	consumer_key = '438zh7gYOYLFd5GE9Wt2HZDgz'
	consumer_secret = 'QjmMOToC7XXi2VxrUbEyvJrVqMeBTJj79Ef6IXMxVDodbh7DtQ'
	access_token = '2417664006-FuvsiGAO3KFZvuO7tHX2JKhxwgPiLuVgKLj0tlb'
	access_token_secret = 'iS67VqAxKYK3TOLojyEAN60fN9Bf5lq6qxJQbVegG5Qda'
	auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
	auth.set_access_token(access_token, access_token_secret)
	api = tweepy.API(auth)
	user = api.me()
	print(user.name)
	results = api.search(
		lang="en",
		q=_keyword + " -rt",
		count=_number,
		result_type="recent"
		)
	tweets = []
	for i in results:
		tweets.append(i.text)
	polarity_list = []
	numbers_list = []
	number = 1
	for i in tweets:
		analysis = TextBlob(i)
		fanalysis = analysis.sentiment
		polarity = fanalysis.polarity
		polarity_list.append(polarity)
		numbers_list.append(number)
		number = number + 1

	averagePolarity = (sum(polarity_list))/(len(polarity_list))
	averagePolarity = "{0:.0f}%".format(averagePolarity * 100)
	time  = datetime.now().strftime("At: %H:%M\nOn: %m-%d-%y")

	#plotly
	my_plot_div = plot([Scatter(x=numbers_list, y=polarity_list,mode = "markers",text = tweets, marker = dict(size = 10))], output_type='div')
	print(_keyword,_number,averagePolarity)

	comment_words = ' '
	stopwords = set(STOPWORDS)
	stopwords.add("https")
	stopwords.add(_keyword)
	stopwords.add("co")
	for val in tweets:
	    val = str(val)

	        # split the value
	    tokens = val.split()

	        # Converts each token into lowercase
	    for i in range(len(tokens)):
	        tokens[i] = tokens[i].lower()

	    for words in tokens:
	        comment_words = comment_words + words + ' '

	mask = np.array(Image.open(requests.get('https://c7.uihere.com/files/354/747/724/logo-united-states-presidential-election-debates-2016-icon-twitter-png-image-thumb.jpg', stream=True).raw))

	wordcloud = WordCloud(width = 800, height = 800,background_color ='white',stopwords = stopwords,min_font_size = 10, mask=mask).generate(comment_words)

	plt.figure(figsize=(10,8),facecolor = 'white', edgecolor='blue')
	plt.imshow(wordcloud)
	plt.axis('off')
	plt.tight_layout(pad=0)

	plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/total_wordcloud.png")

	df = pd.DataFrame({"tweets":tweets,"polarity":polarity_list})
	a = df.loc[df["polarity"]<0]
	comment_words = ' '
	stopwords = set(STOPWORDS)
	stopwords.add("https")
	stopwords.add(_keyword)
	stopwords.add("co")

	for val in a["tweets"]:
	   val = str(val)
	   # split the value
	   tokens = val.split()

	        # Converts each token into lowercase
	   for i in range(len(tokens)):
	      tokens[i] = tokens[i].lower()

	   for words in tokens:
	      comment_words = comment_words + words + ' '

	mask = np.array(Image.open(requests.get('https://c7.uihere.com/files/354/747/724/logo-united-states-presidential-election-debates-2016-icon-twitter-png-image-thumb.jpg', stream=True).raw))

	wordcloud = WordCloud(width = 800, height = 800,background_color ='white',stopwords = stopwords,min_font_size = 10, mask=mask).generate(comment_words)

	plt.figure(figsize=(10,8),facecolor = 'white', edgecolor='blue')
	plt.imshow(wordcloud)
	plt.axis('off')
	plt.tight_layout(pad=0)

	plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/negative_cloud.png")

	tfidf_vectorizer = TfidfVectorizer(min_df=1,stop_words="english")

	simi = []
	b = list(a["tweets"])
	p = list(a["polarity"])
	n = []
	for i in range(0,len(b)):
		n.append(i)

	for i in range(0,len(b)):

		subsimi = []
		for j in range(0,len(b)):

			train_set = [b[i],b[j]]
			tfidf_mat = tfidf_vectorizer.fit_transform(train_set)
			a = cosine_similarity(tfidf_mat[0:1],tfidf_mat)
			subsimi.append(a[0][1])

		simi.append(subsimi)
	dis = np.array(simi)
	dis = np.round(dis,2)
	diss = 1- dis
	similarity = diss
	graph = similarity.copy()

	model = AgglomerativeClustering(affinity="precomputed", n_clusters = 3, linkage="complete").fit(graph)
	print(model.labels_)
	clust = model.labels_

	my_plot_div2 = plot([Scatter(x=n, y=p,mode = "markers",text = b, marker = dict(size = 10, color = clust,colorscale='Viridis',showscale=True))], output_type='div')
	return render_template("Tweets_Results.html", T = time, ASentiment = averagePolarity,total = _number, topic = _keyword, div_placeholder=Markup(my_plot_div),second_plot = Markup(my_plot_div2))
Beispiel #11
0
class TableModelView(DatasourceModelView, DeleteMixin,
                     YamlExportMixin):  # noqa
    datamodel = SQLAInterface(models.SqlaTable)

    list_title = _('List Tables')
    show_title = _('Show Table')
    add_title = _('Add Table')
    edit_title = _('Edit Table')

    list_columns = ['link', 'database', 'changed_by_', 'modified']
    order_columns = ['modified']
    add_columns = ['database', 'schema', 'table_name']
    edit_columns = [
        'table_name',
        'sql',
        'filter_select_enabled',
        'slices',
        'fetch_values_predicate',
        'database',
        'schema',
        'description',
        'owner',
        'main_dttm_col',
        'default_endpoint',
        'offset',
        'cache_timeout',
        'is_sqllab_view',
        'template_params',
    ]
    base_filters = [['id', DatasourceFilter, lambda: []]]
    show_columns = edit_columns + ['perm']
    related_views = [TableColumnInlineView, SqlMetricInlineView]
    base_order = ('changed_on', 'desc')
    search_columns = (
        'database',
        'schema',
        'table_name',
        'owner',
        'is_sqllab_view',
    )
    description_columns = {
        'slices':
        _('The list of charts associated with this table. By '
          'altering this datasource, you may change how these associated '
          'charts behave. '
          'Also note that charts need to point to a datasource, so '
          'this form will fail at saving if removing charts from a '
          'datasource. If you want to change the datasource for a chart, '
          "overwrite the chart from the 'explore view'"),
        'offset':
        _('Timezone offset (in hours) for this datasource'),
        'table_name':
        _('Name of the table that exists in the source database'),
        'schema':
        _('Schema, as used only in some databases like Postgres, Redshift '
          'and DB2'),
        'description':
        Markup(
            'Supports <a href="https://daringfireball.net/projects/markdown/">'
            'markdown</a>'),
        'sql':
        _(
            'This fields acts a Superset view, meaning that Superset will '
            'run a query against this string as a subquery.', ),
        'fetch_values_predicate':
        _(
            'Predicate applied when fetching distinct value to '
            'populate the filter control component. Supports '
            'jinja template syntax. Applies only when '
            '`Enable Filter Select` is on.', ),
        'default_endpoint':
        _('Redirects to this endpoint when clicking on the table '
          'from the table list'),
        'filter_select_enabled':
        _("Whether to populate the filter's dropdown in the explore "
          "view's filter section with a list of distinct values fetched "
          'from the backend on the fly'),
        'is_sqllab_view':
        _("Whether the table was generated by the 'Visualize' flow "
          'in SQL Lab'),
        'template_params':
        _('A set of parameters that become available in the query using '
          'Jinja templating syntax'),
    }
    label_columns = {
        'slices': _('Associated Charts'),
        'link': _('Table'),
        'changed_by_': _('Changed By'),
        'database': _('Database'),
        'changed_on_': _('Last Changed'),
        'filter_select_enabled': _('Enable Filter Select'),
        'schema': _('Schema'),
        'default_endpoint': _('Default Endpoint'),
        'offset': _('Offset'),
        'cache_timeout': _('Cache Timeout'),
        'table_name': _('Table Name'),
        'fetch_values_predicate': _('Fetch Values Predicate'),
        'owner': _('Owner'),
        'main_dttm_col': _('Main Datetime Column'),
        'description': _('Description'),
        'is_sqllab_view': _('SQL Lab View'),
        'template_params': _('Template parameters'),
    }

    def pre_add(self, table):
        with db.session.no_autoflush:
            table_query = db.session.query(models.SqlaTable).filter(
                models.SqlaTable.table_name == table.table_name,
                models.SqlaTable.schema == table.schema,
                models.SqlaTable.database_id == table.database.id)
            if db.session.query(table_query.exists()).scalar():
                raise Exception(get_datasource_exist_error_mgs(
                    table.full_name))

        # Fail before adding if the table can't be found
        try:
            table.get_sqla_table_object()
        except Exception:
            raise Exception(
                _('Table [{}] could not be found, '
                  'please double check your '
                  'database connection, schema, and '
                  'table name').format(table.name))

    def post_add(self, table, flash_message=True):
        table.fetch_metadata()
        security_manager.merge_perm('datasource_access', table.get_perm())
        if table.schema:
            security_manager.merge_perm('schema_access', table.schema_perm)

        if flash_message:
            flash(
                _('The table was created. '
                  'As part of this two phase configuration '
                  'process, you should now click the edit button by '
                  'the new table to configure it.'), 'info')

    def post_update(self, table):
        self.post_add(table, flash_message=False)

    def _delete(self, pk):
        DeleteMixin._delete(self, pk)

    @expose('/edit/<pk>', methods=['GET', 'POST'])
    @has_access
    def edit(self, pk):
        """Simple hack to redirect to explore view after saving"""
        resp = super(TableModelView, self).edit(pk)
        if isinstance(resp, basestring):
            return resp
        return redirect('/superset/explore/table/{}/'.format(pk))

    @action('refresh', __('Refresh Metadata'), __('Refresh column metadata'),
            'fa-refresh')
    def refresh(self, tables):
        if not isinstance(tables, list):
            tables = [tables]
        for t in tables:
            t.fetch_metadata()
        msg = _('Metadata refreshed for the following table(s): %(tables)s',
                tables=', '.join([t.table_name for t in tables]))
        flash(msg, 'info')
        return redirect('/tablemodelview/list/')
Beispiel #12
0
def Course_Results():
	_courseName = request.form['course']
	df = pd.read_excel('C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/output.xlsx')
	df.head()
	review_lst = []
	s_polarity = []
	n_polarity = []
	positive_reviews_lst = []
	negative_reviews_lst = []
	positive_reviews = 0
	negative_reviews = 0
	neutral_reviews = 0
	course_input = _courseName
	course_input = course_input.lower()
	df= df.applymap(lambda s:s.lower() if type(s) == str else s)
	df = df.set_index("Course_name", drop = False)
	try:
		if df['Course_name'].str.contains(str(course_input)).any():
		    print("Course is available")
		    print(course_input)
		    is_course_in_name = df['Course_name'].str.contains(str(course_input))
		    course_in_name = df[is_course_in_name]
		    #print(course_in_name.head())
		    course_lst = list((course_in_name.loc[:,"Course_name"]))
		    print(course_lst)
		    url = (course_in_name.loc[str(course_input),"course_url"])
		    review_no = (course_in_name.loc[str(course_input),"review_nos"])
		    print(url)
		    print(review_no)
		    if review_no == 0:
		    	print("No Reviews Available")
		    	return render_template("Course_Analysis.html", Course_header = 'No Reviews Available')
		    else:
		    	try:
		    		page = requests.get(url)
		    		soup = bs4.BeautifulSoup(page.text,'html.parser')
		    		result = soup.findAll("div",{"class":"review-body__content"})
		    	except:
		    		return "Please Check Internet Connection"

		    	for res in result:
		    		review = res.find("span",{"class":"more-less-trigger__text--full"}).text.strip()
		    		review_lst.append(review)
		    		#print(review_lst)
		    		#print(len(review_lst))
		    	for page_no in range(2,10):
		    		if len(review_lst) == int(review_no):
		    			break
		    		p_url = str(url)+"?page="+str(page_no)+"#incourse-reviews"
		    		page = requests.get(p_url)
		    		soup = bs4.BeautifulSoup(page.text,'html.parser')
		    		result = soup.findAll("div",{"class":"review-body__content"})
		    		for res in result:
		    			review = res.find("span",{"class":"more-less-trigger__text--full"}).text.strip()
		    			review_lst.append(review)

		    	print(len(review_lst))
		    	#print(review_lst)
		    	for r in range(0,len(review_lst)):
		    		analysis = TextBlob(review_lst[r])
		    		s_polarity.append(analysis.sentiment.polarity)
		    		#if analysis.detect_language() == 'en':
		    		#print(analysis.sentiment)
		    		#print(analysis.sentiment.polarity)
		    		if analysis.sentiment[0]>0:
		    			positive_reviews = positive_reviews + 1
		    			positive_reviews_lst.append(review_lst[r])
		    		elif analysis.sentiment[0]<0:
		    			negative_reviews = negative_reviews + 1
		    			negative_reviews_lst.append(review_lst[r])
		    			n_polarity.append(analysis.sentiment.polarity)
		    		else:
		    			neutral_reviews = neutral_reviews + 1
		    		total_reviews = positive_reviews + negative_reviews + neutral_reviews
		else:
			print("Course is not available")
		def Average(lst):
			return sum(lst) / len(lst)
		sentiment_polarity = round(Average(s_polarity),2)
		print("Total Reviews = " + str(total_reviews))
		print("Positive Reviews = " + str(positive_reviews))
		print("Negative Reviews = " + str(negative_reviews))
		print("Neutral Reviews = " + str(neutral_reviews))
		print("Sentiment_Polarity = " + str(round(sentiment_polarity,2)))

		# Scatter plot
		plt.figure(figsize = (4.5, 4.5), facecolor = None)
		x = range(0, total_reviews)
		y = s_polarity
		colors = ("blue")
		plt.scatter(x, y, c=colors, alpha=0.8)
		plt.title('Scatter plot of Course Reviews')
		plt.xlabel('No of Reviews')
		plt.ylabel('Sentiment Polarity')
		plt.axhline(0, color='green')
		plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/course_scatter.png")

		# Pie Chart
		#plt.figure(figsize = (5, 5), facecolor = None)
		labels = ['positive', 'negative', 'neutral']
		sizes = [positive_reviews, negative_reviews, neutral_reviews]
		#colors = ['yellowgreen', 'lightcoral', 'lightskyblue']
		#explode = (0, 0, 0)
		#plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
		#plt.axis('equal')
		#plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/course_ana.png")

		course_plot_pie = plot([Pie(values = sizes, labels = labels)], output_type='div')

		# World Cloud Positive
		comment_words = ' '
		stopwords = set(STOPWORDS)
		for val in positive_reviews_lst:
			val = str(val)
			# split the value
			tokens = val.split()
			# Converts each token into lowercase
			for i in range(len(tokens)):
				tokens[i] = tokens[i].lower()
			for words in tokens:
				comment_words = comment_words + words + ' '
		wordcloud = WordCloud(width = 800, height = 800,background_color ='white',stopwords = stopwords,min_font_size = 10).generate(comment_words)
		plt.figure(figsize = (5, 5), facecolor = None)
		plt.imshow(wordcloud)
		plt.axis("off")
		plt.tight_layout(pad = 0)
		plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/course_positive_cloud.png")

		# World Cloud Negative
		if len(negative_reviews_lst) != 0:
			comment_words = ' '
			stopwords = set(STOPWORDS)
			for val in negative_reviews_lst:
				val = str(val)
				# split the value
				tokens = val.split()
				# Converts each token into lowercase
				for i in range(len(tokens)):
					tokens[i] = tokens[i].lower()
				for words in tokens:
					comment_words = comment_words + words + ' '
			wordcloud = WordCloud(width = 800, height = 800,background_color ='white',stopwords = stopwords,min_font_size = 10).generate(comment_words)
			plt.figure(figsize = (5, 5), facecolor = None)
			plt.imshow(wordcloud)
			plt.axis("off")
			plt.tight_layout(pad = 0)
			plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/course_negative_cloud.png")

		else:
			negative_reviews_lst = ['Zero Negative Reviews']
			comment_words = ' '
			stopwords = set(STOPWORDS)
			for val in negative_reviews_lst:
				val = str(val)
				# split the value
				tokens = val.split()
				# Converts each token into lowercase
				for i in range(len(tokens)):
					tokens[i] = tokens[i].lower()
				for words in tokens:
					comment_words = comment_words + words + ' '
			wordcloud = WordCloud(width = 800, height = 800,background_color ='white',stopwords = stopwords,min_font_size = 10).generate(comment_words)
			plt.figure(figsize = (5, 5), facecolor = None)
			plt.imshow(wordcloud)
			plt.axis("off")
			plt.tight_layout(pad = 0)
			plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/course_negative_cloud.png")

		return render_template("Course_Results.html", ASentiment = str(int(sentiment_polarity*100)) +"%",total = total_reviews, negative = negative_reviews, positive = positive_reviews, neutral = neutral_reviews, pie = Markup(course_plot_pie), Course_Name = course_input.title())
	except:
		return "Please type the exact name of the course"
Beispiel #13
0
def File_Results():
	if request.method == 'POST':
		try:
			f = request.files['file']
			f.filename = "senti.csv"
			f.save("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/"+f.filename)

		
			df = pd.read_csv("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/senti.csv")
			polarity_list = []
			numbers_list = []
			number = 1
			polar = []
			df1 = list(df.iloc[:,0])
			for i in df1:
				analysis = TextBlob(i)
				fanalysis = analysis.sentiment
				polarity = fanalysis.polarity
				polarity_list.append(polarity)
				numbers_list.append(number)
				number = number + 1

			negative = 0
			positive = 0
			neutral = 0
			for i in polarity_list:
				if i < 0:
					polar.append("Negative")
					negative += 1
				if i == 0:
					polar.append("Neutral")
					neutral += 1
				else:
					polar.append("Positive")
					positive += 1
			total_text = negative + positive + neutral
			averagePolarity = (sum(polarity_list))/(len(polarity_list))
			averagePolarity = "{0:.0f}%".format(averagePolarity * 100)
			time  = datetime.now().strftime("At: %H:%M\nOn: %m-%d-%y")
			values = [negative,neutral,positive]

			my_plot_pie = plot([Pie(values = values, labels = ["Negative","Neutral","Positive"])], output_type='div')

			print("all ok 4")
			comment_words = ' '
			stopwords = set(STOPWORDS)
			df = pd.DataFrame({"text":df1,"polarity":polarity_list})
			a = df.loc[df["polarity"]<0]
			comment_words = ' '
			stopwords = set(STOPWORDS)

			for val in a["text"]:
				val = str(val)

	            # split the value
				tokens = val.split()

	            # Converts each token into lowercase
				for i in range(len(tokens)):
					tokens[i] = tokens[i].lower()
				for words in tokens:
					comment_words = comment_words + words + ' '

			wordcloud = WordCloud(width = 800, height = 800,background_color ='white',stopwords = stopwords,min_font_size = 10).generate(comment_words)
			plt.figure(figsize = (5, 5), facecolor = None)
			plt.imshow(wordcloud)
			plt.axis("off")
			plt.tight_layout(pad = 0)
			plt.savefig("C:/Users/Aniket/Desktop/Aegis School of Business/Python/Evaluation/flask_pro/static/file_negative_cloud.png")
	#######################################
			tfidf_vectorizer = TfidfVectorizer(min_df=1,stop_words="english")

			simi = []
			b = list(a["text"])
			p = list(a["polarity"])
			n = []
			for i in range(0,len(b)):
				n.append(i)

			for i in range(0,len(b)):

				subsimi = []
				for j in range(0,len(b)):

					train_set = [b[i],b[j]]
					tfidf_mat = tfidf_vectorizer.fit_transform(train_set)
					a = cosine_similarity(tfidf_mat[0:1],tfidf_mat)
					subsimi.append(a[0][1])

				simi.append(subsimi)
			dis = np.array(simi)
			dis = np.round(dis,2)
			diss = 1- dis
			similarity = diss
			graph = similarity.copy()

			model = AgglomerativeClustering(affinity="precomputed", n_clusters = 3, linkage="complete").fit(graph)
			print(model.labels_)
			clust = model.labels_

			my_plot_file = plot([Scatter(x=n, y=p,mode = "markers",text = b, marker = dict(size = 10, color = clust,colorscale='Viridis',showscale=True))], output_type='div')

			return render_template("File_Results.html", ASentiment = averagePolarity,total = total_text, negative = negative, positive = positive, neutral = neutral, pie = Markup(my_plot_pie),file = Markup(my_plot_file))
		except:
			return 'please upload a csv file'
Beispiel #14
0
def aviso_grupos():
    return Markup('<a href="%s">Alguns fornecedores' % url_for('grupos') +
                  ' foram agrupados conforme' +
                  ' listado nessa outra página.</a>')
Beispiel #15
0
def upload():
    if not config.config_uploading:
        abort(404)
    if request.method == 'POST' and 'btn-upload' in request.files:
        for requested_file in request.files.getlist("btn-upload"):
            try:
                modif_date = False
                # create the function for sorting...
                calibre_db.update_title_sort(config)
                calibre_db.session.connection().connection.connection.create_function('uuid4', 0, lambda: str(uuid4()))

                # check if file extension is correct
                if '.' in requested_file.filename:
                    file_ext = requested_file.filename.rsplit('.', 1)[-1].lower()
                    if file_ext not in constants.EXTENSIONS_UPLOAD and '' not in constants.EXTENSIONS_UPLOAD:
                        flash(
                            _("File extension '%(ext)s' is not allowed to be uploaded to this server",
                              ext=file_ext), category="error")
                        return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json')
                else:
                    flash(_('File to be uploaded must have an extension'), category="error")
                    return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json')

                # extract metadata from file
                try:
                    meta = uploader.upload(requested_file, config.config_rarfile_location)
                except (IOError, OSError):
                    log.error("File %s could not saved to temp dir", requested_file.filename)
                    flash(_(u"File %(filename)s could not saved to temp dir",
                            filename= requested_file.filename), category="error")
                    return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json')
                title = meta.title
                authr = meta.author

                if title != _(u'Unknown') and authr != _(u'Unknown'):
                    entry = calibre_db.check_exists_book(authr, title)
                    if entry:
                        log.info("Uploaded book probably exists in library")
                        flash(_(u"Uploaded book probably exists in the library, consider to change before upload new: ")
                            + Markup(render_title_template('book_exists_flash.html', entry=entry)), category="warning")

                # handle authors
                input_authors = authr.split('&')
                # handle_authors(input_authors)
                input_authors = list(map(lambda it: it.strip().replace(',', '|'), input_authors))
                # Remove duplicates in authors list
                input_authors = helper.uniq(input_authors)

                # we have all author names now
                if input_authors == ['']:
                    input_authors = [_(u'Unknown')]  # prevent empty Author

                sort_authors_list=list()
                db_author = None
                for inp in input_authors:
                    stored_author = calibre_db.session.query(db.Authors).filter(db.Authors.name == inp).first()
                    if not stored_author:
                        if not db_author:
                            db_author = db.Authors(inp, helper.get_sorted_author(inp), "")
                            calibre_db.session.add(db_author)
                            calibre_db.session.commit()
                        sort_author = helper.get_sorted_author(inp)
                    else:
                        if not db_author:
                            db_author = stored_author
                        sort_author = stored_author.sort
                    sort_authors_list.append(sort_author)
                sort_authors = ' & '.join(sort_authors_list)

                title_dir = helper.get_valid_filename(title)
                author_dir = helper.get_valid_filename(db_author.name)

                # combine path and normalize path from windows systems
                path = os.path.join(author_dir, title_dir).replace('\\', '/')
                # Calibre adds books with utc as timezone
                db_book = db.Books(title, "", sort_authors, datetime.utcnow(), datetime(101, 1, 1),
                                   '1', datetime.utcnow(), path, meta.cover, db_author, [], "")

                modif_date |= modify_database_object(input_authors, db_book.authors, db.Authors, calibre_db.session,
                                                     'author')

                # Add series_index to book
                modif_date |= edit_book_series_index(meta.series_id, db_book)

                # add languages
                modif_date |= edit_book_languages(meta.languages, db_book, upload=True)

                # handle tags
                modif_date |= edit_book_tags(meta.tags, db_book)

                # handle series
                modif_date |= edit_book_series(meta.series, db_book)

                # Add file to book
                file_size = os.path.getsize(meta.file_path)
                db_data = db.Data(db_book, meta.extension.upper()[1:], file_size, title_dir)
                db_book.data.append(db_data)
                calibre_db.session.add(db_book)

                # flush content, get db_book.id available
                calibre_db.session.flush()

                # Comments needs book id therfore only possible after flush
                modif_date |= edit_book_comments(Markup(meta.description).unescape(), db_book)

                book_id = db_book.id
                title = db_book.title

                error = helper.update_dir_structure_file(book_id,
                                                   config.config_calibre_dir,
                                                   input_authors[0],
                                                   meta.file_path,
                                                   title_dir + meta.extension)

                # move cover to final directory, including book id
                if meta.cover:
                    coverfile = meta.cover
                else:
                    coverfile = os.path.join(constants.STATIC_DIR, 'generic_cover.jpg')
                new_coverpath = os.path.join(config.config_calibre_dir, db_book.path, "cover.jpg")
                try:
                    copyfile(coverfile, new_coverpath)
                    if meta.cover:
                        os.unlink(meta.cover)
                except OSError as e:
                    log.error("Failed to move cover file %s: %s", new_coverpath, e)
                    flash(_(u"Failed to Move Cover File %(file)s: %(error)s", file=new_coverpath,
                            error=e),
                          category="error")

                # save data to database, reread data
                calibre_db.session.commit()

                if config.config_use_google_drive:
                    gdriveutils.updateGdriveCalibreFromLocal()
                if error:
                    flash(error, category="error")
                uploadText=_(u"File %(file)s uploaded", file=title)
                WorkerThread.add(current_user.nickname, TaskUpload(
                    "<a href=\"" + url_for('web.show_book', book_id=book_id) + "\">" + uploadText + "</a>"))

                if len(request.files.getlist("btn-upload")) < 2:
                    if current_user.role_edit() or current_user.role_admin():
                        resp = {"location": url_for('editbook.edit_book', book_id=book_id)}
                        return Response(json.dumps(resp), mimetype='application/json')
                    else:
                        resp = {"location": url_for('web.show_book', book_id=book_id)}
                        return Response(json.dumps(resp), mimetype='application/json')
            except OperationalError as e:
                calibre_db.session.rollback()
                log.error("Database error: %s", e)
                flash(_(u"Database error: %(error)s.", error=e), category="error")
        return Response(json.dumps({"location": url_for("web.index")}), mimetype='application/json')
Beispiel #16
0
 def datasource_link(self):
     url = "/superset/explore/{obj.type}/{obj.id}/".format(obj=self)
     name = escape(self.datasource_name)
     return Markup('<a href="{url}">{name}</a>'.format(**locals()))
class DruidDatasourceModelView(  # pylint: disable=too-many-ancestors
        EnsureEnabledMixin,
        DatasourceModelView,
        DeleteMixin,
        YamlExportMixin,
):
    datamodel = SQLAInterface(models.DruidDatasource)
    include_route_methods = RouteMethod.CRUD_SET
    list_title = _("Druid Datasources")
    show_title = _("Show Druid Datasource")
    add_title = _("Add Druid Datasource")
    edit_title = _("Edit Druid Datasource")

    list_columns = ["datasource_link", "cluster", "changed_by_", "modified"]
    order_columns = ["datasource_link", "modified"]
    related_views = [DruidColumnInlineView, DruidMetricInlineView]
    edit_columns = [
        "datasource_name",
        "cluster",
        "description",
        "owners",
        "is_hidden",
        "filter_select_enabled",
        "fetch_values_from",
        "default_endpoint",
        "offset",
        "cache_timeout",
    ]
    search_columns = ("datasource_name", "cluster", "description", "owners")
    add_columns = edit_columns
    show_columns = add_columns + ["perm", "slices"]
    page_size = 500
    base_order = ("datasource_name", "asc")
    description_columns = {
        "slices":
        _("The list of charts associated with this table. By "
          "altering this datasource, you may change how these associated "
          "charts behave. "
          "Also note that charts need to point to a datasource, so "
          "this form will fail at saving if removing charts from a "
          "datasource. If you want to change the datasource for a chart, "
          "overwrite the chart from the 'explore view'"),
        "offset":
        _("Timezone offset (in hours) for this datasource"),
        "description":
        Markup('Supports <a href="'
               'https://daringfireball.net/projects/markdown/">markdown</a>'),
        "fetch_values_from":
        _("Time expression to use as a predicate when retrieving "
          "distinct values to populate the filter component. "
          "Only applies when `Enable Filter Select` is on. If "
          "you enter `7 days ago`, the distinct list of values in "
          "the filter will be populated based on the distinct value over "
          "the past week"),
        "filter_select_enabled":
        _("Whether to populate the filter's dropdown in the explore "
          "view's filter section with a list of distinct values fetched "
          "from the backend on the fly"),
        "default_endpoint":
        _("Redirects to this endpoint when clicking on the datasource "
          "from the datasource list"),
        "cache_timeout":
        _("Duration (in seconds) of the caching timeout for this datasource. "
          "A timeout of 0 indicates that the cache never expires. "
          "Note this defaults to the cluster timeout if undefined."),
    }
    base_filters = [["id", DatasourceFilter, lambda: []]]
    label_columns = {
        "slices": _("Associated Charts"),
        "datasource_link": _("Data Source"),
        "cluster": _("Cluster"),
        "description": _("Description"),
        "owners": _("Owners"),
        "is_hidden": _("Is Hidden"),
        "filter_select_enabled": _("Enable Filter Select"),
        "default_endpoint": _("Default Endpoint"),
        "offset": _("Time Offset"),
        "cache_timeout": _("Cache Timeout"),
        "datasource_name": _("Datasource Name"),
        "fetch_values_from": _("Fetch Values From"),
        "changed_by_": _("Changed By"),
        "modified": _("Modified"),
    }
    edit_form_extra_fields = {
        "cluster":
        QuerySelectField(
            "Cluster",
            query_factory=lambda: db.session.query(models.DruidCluster),
            widget=Select2Widget(extra_classes="readonly"),
        ),
        "datasource_name":
        StringField("Datasource Name", widget=BS3TextFieldROWidget()),
    }

    def pre_add(self, item: "DruidDatasourceModelView") -> None:
        with db.session.no_autoflush:
            query = db.session.query(models.DruidDatasource).filter(
                models.DruidDatasource.datasource_name == item.datasource_name,
                models.DruidDatasource.cluster_id == item.cluster_id,
            )
            if db.session.query(query.exists()).scalar():
                raise Exception(get_dataset_exist_error_msg(item.full_name))

    def post_add(self, item: "DruidDatasourceModelView") -> None:
        item.refresh_metrics()
        security_manager.add_permission_view_menu("datasource_access",
                                                  item.get_perm())
        if item.schema:
            security_manager.add_permission_view_menu("schema_access",
                                                      item.schema_perm)

    def post_update(self, item: "DruidDatasourceModelView") -> None:
        self.post_add(item)

    def _delete(self, pk: int) -> None:
        DeleteMixin._delete(self, pk)
Beispiel #18
0
from flask.views import MethodView
from flask.json import JSONEncoder
from mistune import markdown
from .constants import OPTIONAL_FIELDS
from .utils import extract_definitions
from .utils import get_specs
from .utils import get_schema_specs
from .utils import parse_definition_docstring
from .utils import get_vendor_extension_fields
from .utils import validate
from .utils import LazyString
from . import __version__

NO_SANITIZER = lambda text: text  # noqa
BR_SANITIZER = lambda text: text.replace('\n', '<br/>') if text else text  # noqa
MK_SANITIZER = lambda text: Markup(markdown(text)) if text else text  # noqa


class APIDocsView(MethodView):
    """
    The /apidocs
    """
    def __init__(self, *args, **kwargs):
        view_args = kwargs.pop('view_args', {})
        self.config = view_args.get('config')
        super(APIDocsView, self).__init__(*args, **kwargs)

    def get(self):
        """
        The data under /apidocs
        json or Swagger UI
Beispiel #19
0
 def created(self):
     #date = self.created_on
     #return date.strftime('We are the %d, %b %Y')
     #return Markup(_(momentjs(self.created_on).calendar() + ' | ' + momentjs(self.created_on).fromNow()))
     return Markup(momentjs(self.created_on).format('D MMM Y | LT'))
Beispiel #20
0
 def clean_username(self):
     self.username = Markup(self.username).striptags()
Beispiel #21
0
def about():
    name = "about"
    content = Markup(markdown.markdown(open(settings.APP_PATH + "/about/about.md").read()))
    return render_template("about/about.html", **locals())
Beispiel #22
0
 def clean_about(self):
     self.about = (Markup(self.about.replace('<br>',
                                             '\^n^')).striptags().replace(
                                                 '\^n^', '\n'))
from flask import Markup
from common.logger import logger
from random import shuffle
from jinja2 import Environment, PackageLoader
import common.turk_utils as turk_utils
import models.Response as Response
from models.Message import RatedMessage, GeneratedMessage, FixedMessage
from common.dbclient import db
from common import helpers
import ipdb
import json

MIN_RATING_TASK_TIME = 10  # seconds

env = Environment(loader=PackageLoader('ken', 'templates/experts_bakeoff'))
env.filters['escapejs'] = lambda v: Markup(json.dumps(v))
# templates/3x2 for varying roles_goals, tips experiment
# templates/experts_bakeoff for varying different personas

logr = logger(fname='pcbc-mturk-%s' % __name__)

personas = {
    'Charles': {
        'photo_url':
        'https://intecolab.com:5000/public/Personas/charles.jpg',
        'roles_goals': [
            "He is 48 years old",
            "He is married with two kids",
            "Being a good father and a husband is really important to him",
            "He would like to feel more confident about his appearance",
        ]
Beispiel #24
0
def musical(s):
    return s + Markup(' &#9835;')
Beispiel #25
0
 def html(self):
     return Markup(self.render('googlemaps/gmap.html', gmap=self))
Beispiel #26
0
    assert unlink_govuk_escaped(template_content) == expected
    assert str(
        PlainTextEmailTemplate({
            'content': template_content,
            'subject': ''
        })) == expected
    assert expected in str(
        HTMLEmailTemplate({
            'content': template_content,
            'subject': ''
        }))


@mock.patch('notifications_utils.template.add_prefix', return_value='')
@pytest.mark.parametrize("template_class, prefix, body, expected_call", [
    (SMSMessageTemplate, "a", "b", (Markup("b"), "a")),
    (SMSPreviewTemplate, "a", "b", (Markup("b"), "a")),
    (SMSMessageTemplate, None, "b", (Markup("b"), None)),
    (SMSPreviewTemplate, None, "b", (Markup("b"), None)),
    (SMSMessageTemplate, '<em>ht&ml</em>', "b",
     (Markup("b"), '<em>ht&ml</em>')),
    (SMSPreviewTemplate, '<em>ht&ml</em>', "b",
     (Markup("b"), '&lt;em&gt;ht&amp;ml&lt;/em&gt;')),
])
def test_sms_message_adds_prefix(add_prefix, template_class, prefix, body,
                                 expected_call):
    template = template_class({'content': body})
    template.prefix = prefix
    template.sender = None
    str(template)
    add_prefix.assert_called_once_with(*expected_call)
Beispiel #27
0
        visit.site_id = form.site.data.id
        db.session.add(visit)
        db.session.commit()
        flash("Added visit for site {}".format(form.site.data.base_url))
        return redirect(url_for(".index"))

    return render_template("validation_error.html", form=form)


@flask.route("/sites")
def view_sites():
    query = Site.query.filter(Site.id >= 0)
    data = query_to_list(query)

    # The header row should not be linked
    results = [next(data)]
    for row in data:
        row = [_make_link(cell) if i == 0 else cell
               for i, cell in enumerate(row)]
        results.append(row)

    return render_template("data_list.html", data=results, title="Sites")


_LINK = Markup('<a href="{url}">{name}</a>')


def _make_link(site_id):
    url = url_for(".view_site_visits", site_id=site_id)
    return _LINK.format(url=url, name=site_id)
Beispiel #28
0
def test_subject_line_gets_replaced():
    template = WithSubjectTemplate({"content": '', 'subject': '((name))'})
    assert template.subject == Markup(
        "<span class='placeholder'>((name))</span>")
    template.values = {'name': 'Jo'}
    assert template.subject == 'Jo'
Beispiel #29
0
 def render_gfm(s):
     output = markdown.markdown(s, extensions=['gfm'])
     moutput = Markup(output)
     return moutput
Beispiel #30
0
 def js(self):
     return Markup(
         self.render('googlemaps/gmapjs.html',
                     gmap=self,
                     DEFAULT_ICON=DEFAULT_ICON))