コード例 #1
0
ファイル: svm_bench.py プロジェクト: ZENGXH/kaggle
# svm_bench.py
import logging
import my_io
import classification_baseline
from sklearn import svm
import numpy as np
from sklearn.metrics import classification
from numpy import linalg as LA

my_io.setUp('./biological_response/')

my_io.startLog(__name__)
logger = logging.getLogger(__name__)

y,X,trainData,testData = my_io.readCsv()
portion = 0.2
seed = 1
X_test, X_train, y_train, y_test = classification_baseline.splitData(X,y,portion,seed)

logger.info('init svm classifier')
svc = svm.SVC(probability = True)
logger.info('fitting svc')
svc.fit(X_train, y_train)
logger.info('start predict')
predict_probs = svc.predict_proba(X_test)

predict = my_io.toZeroOne(predict_probs)
# error = classification.zero_one_loss(y_test, predict)
loss = np.subtract(predict,y_test)

error = LA.norm(loss)
コード例 #2
0
ファイル: test_my_io.py プロジェクト: ZENGXH/kaggle
# test my_io.py

import my_io
import numpy as np
import logging

# test setUp
my_io.setUp('digit/')
startLog(__name__)
logger = logging.getLogger(__name__)

"""# test read data
my_io.readCsv()
a = [1,2,3]
my_io.writeCsv(a)
my_io.startLog(__name__)
logger = logging.getLogger(__name__)
"""

a = [['1','2.2','3.3'],['3.1','2.3','2']]
b = a[0]

print my_io.toFloat(a)
print my_io.toZeroOne(b)

logger.info('pass test :) ') 

コード例 #3
0
ファイル: rf_bench.py プロジェクト: ZENGXH/kaggle
# randomforest_bench.py
import logging
import my_io
import classification_baseline
from sklearn import svm
import numpy as np
#from sklearn.metrics import classification
from numpy import linalg as LA

my_io.setUp('./digit/')

my_io.startLog(__name__)
logger = logging.getLogger(__name__)

"""y,X,trainData,testData = my_io.readCsv()
portion = 0.2
seed = 1
X_test, X_train, y_train, y_test = classification_baseline.splitData(X,y,portion,seed)

logger.info('init svm classifier')
svc = svm.SVC(probability = True)
logger.info('fitting svc')
svc.fit(X_train, y_train)
logger.info('start predict')
predict_probs = svc.predict_proba(X_test)

predict = my_io.toZeroOne(predict_probs)
# error = classification.zero_one_loss(y_test, predict)
loss = np.subtract(predict,y_test)

error = LA.norm(loss)