Beispiel #1
0
	def pr_curve_plot(self,test_array,prediction_prob,title,categories=[]):
		if len(categories)<1:
			categories=self.categories
		fig = plt.figure(figsize=(10,7))
		ax1 = fig.add_subplot(111)
		precision=dict();recall=dict();average_precision = dict()
		for i in range(len(categories)):
			precision[i], recall[i], _ = precision_recall_curve(test_array[:, i],
															prediction_prob[:, i])
			average_precision[i] = average_precision_score(test_array[:, i],
															prediction_prob[:, i])
		precision["micro"], recall["micro"], _ = precision_recall_curve(test_array.ravel(),
																		prediction_prob.ravel())
		average_precision["micro"] = average_precision_score(test_array, prediction_prob,
														 average="micro")
		plt.plot(recall["micro"], precision["micro"],lw=6,
			 label='Average Precision-recall curve (area = {0:0.2f})'
				   ''.format(average_precision["micro"]))
		for i in range(len(categories)):
			plt.plot(recall[i], precision[i],'--',lw=4,
				 label='Precision-recall curve of class {0} (area = {1:0.2f})'
					   ''.format(categories[i], average_precision[i]))
		plt.xlim([0.0, 1.0])
		plt.ylim([0.0, 1.05])
		plt.xlabel('Recall',fontsize=18)
		plt.ylabel('Precision',fontsize=18)
		plt.legend(bbox_to_anchor=(1.1, 1.05),fontsize=14)
		plt.title('Precision vs Recall',fontsize=18)
		ax1.tick_params(axis='both', which='major', labelsize=16)
		plt.style('ggplot')
		fig.savefig(self.results+title+'pr.png', dpi=fig.dpi,bbox_inches='tight')
		plt.close()
Beispiel #2
0
	def roc_curve_plot(self,test_array,prediction_prob,title,categories=[]):
		if len(categories)<1:
			categories=self.categories
		fig = plt.figure(figsize=(10,7))
		ax1 = fig.add_subplot(111)
		fpr=dict();tpr=dict();average_roc_auc = dict();roc_auc=dict()
		for i in range(len(categories)):
			fpr[i], tpr[i], _ = roc_curve(test_array[:, i], prediction_prob[:, i]) 
			roc_auc[i] = auc(fpr[i], tpr[i])
			average_roc_auc[i] = roc_auc_score(test_array[:, i],
												prediction_prob[:, i])	        
		fpr["micro"], tpr["micro"], _ = roc_curve(test_array.ravel(),
												prediction_prob.ravel())
		
		average_roc_auc["micro"] = roc_auc_score(test_array, prediction_prob,
														 average="micro")

		plt.plot(fpr["micro"], tpr["micro"],lw=6,
			 label='Average ROC curve (area = {0:0.2f})'
				   ''.format(average_roc_auc["micro"]))
		for i in range(len(categories)):
			plt.plot(fpr[i], tpr[i],'--',lw=4,
				 label='ROC curve of class {0} (area = {1:0.2f})'
					   ''.format(categories[i], average_roc_auc[i]))
		plt.xlim([0.0, 1.0])
		plt.ylim([0.0, 1.05])
		plt.xlabel('False Positive Rate',fontsize=18)
		plt.ylabel('True Positive Rate',fontsize=18)
		plt.legend(bbox_to_anchor=(1.1, 1.05),fontsize=14)
		ax1.tick_params(axis='both', which='major', labelsize=16)
		plt.title('Receiver operating characteristics',fontsize=18)   
		plt.style('ggplot')
		fig.savefig(self.results+title+'roc.png', dpi=fig.dpi,bbox_inches='tight')
		plt.close()
#make prediction
print("[INFO] Evaluating network...")
preIdxs = model.predict(testX, batch_size=BS)
print(preIdxs)

#find the index with largest predicted probability
preIdxs = np.argmax(preIdxs, axis=1)

#classification report
print(
    classification_report(testY.argmax(axis=1),
                          preIdxs,
                          target_names=lb.classes_))

#serialize the model to disk
print("[INFO] Saving mask detector model...")
model.save(args["model"], save_format="h5")

#plot the training loss and accuracy
N = EPOCHS
plt.style("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
Beispiel #4
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 02:59:43 2020

@author: Regaip
"""

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

veri = pd.read_csv("Borçlar.csv", sep=";", index_col=0)

veri_2017 = veri.iloc[0:12, ]
veri_2018 = veri.iloc[12:24, ]
veri_2019 = veri.iloc[24:, ]
plt.style("whitegrid")
plt.figure(figsize=(15, 10))
plt.bar(veri.index, veri["Toplam"], label="Toplam")
plt.bar(veri.index, veri["Anapara"], label="Anapara")
plt.bar(veri.index, veri["Faiz"], label="Faiz")
plt.legend()
plt.xticks(veri.index, rotation='vertical')
plt.title("Merkezi Yönetim Dış Borç Ödemeleri", fontdict={"size": 20})
plt.xlabel("Aylar ve Yıllar", fontdict={"size": 15})
plt.ylabel("Ödeme Miktarı \nMilyon $", fontdict={"size": 15})

#plt.margins(0.1)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.1)
plt.show()
Beispiel #5
0
import numpy as np
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt

plt.style('ggplot')


def linear_mode():
    dataTrain = pd.read_csv("/Users/senora/Desktop/train.csv")
    dataTest = pd.read_csv("/Users/senora/Desktop/test.csv")

    x_train = dataTrain[[
        'METEOROLOGICAL DROUGHT', 'HYDROLOGICAL DROUGHT',
        'AGRICULTURAL DROUGHT', 'AREA UNDER CULTIVATION'
    ]]
    y_train = dataTrain[['YIELD']]

    x_test = dataTest[[
        'METEOROLOGICAL DROUGHT', 'HYDROLOGICAL DROUGHT',
        'AGRICULTURAL DROUGHT', 'AREA UNDER CULTIVATION'
    ]]
    y_test = dataTest[['YIELD']]

    ols = linear_model.LinearRegression()
    model = ols.fit(x_train, y_train)
    accuracy = ols.score(x_test, y_test)

    return ols, model
Beispiel #6
0
# Autoencoder with Pytroch using fashion MNIST dataset.

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import matplotlib.pyplot as plt
import numpy

figsize = (15, 6)
plt.style('fivethirtyeight')

# 1. Load Dataset

train_dataset = dsets.FashionMNIST(root='./data',
                                   train=True,
                                   transforms=transforms.ToTensor(),
                                   download=True)

test_dataset = dsets.FashionMNIST(root='./data',
                                  train=False,
                                  transforms=transforms.ToTensor(),
                                  download=True)

# 2. Data Loader

batch_size = 100
n_iters = 5000
num_epochs = n_iters / (len(train_dataset) / batch_size)
num_epochs = int(num_epochs)