def getTrainData(chunk,nb_classes,img_rows,img_cols):
    X_train,Y_train = get_train_data(chunk,img_rows,img_cols)
    if (X_train!=None and Y_train!=None):
        X_train/=255
        # X_train=X_train-np.average(X_train)
        # Y_train=np_utils.to_categorical(Y_train,nb_classes)
    return (X_train,Y_train)
def train(N=806148):
    cnt = 0
    phase = 0
    limit = 1000
    alpha = 0.001
    ambit = 0.00001
    costf_hist, costc_hist, costl_hist = [], [], []
    thetaf, thetac, thetal = np.zeros((N-3,1)), np.zeros((N-3,1)), np.zeros((N-3,1))

    for data in get_train_data():
        X, Yf, Yc, Yl = [], [], [], []
        for i in data:
            X.append( i[:-3] )
            Yf.append( i[-3] )
            Yc.append( i[-2] )
            Yl.append( i[-1] )
        thetaf = stochastic_gradient_descent(np.mat(X), np.mat(Yf).T, thetaf, alpha)
        thetac = stochastic_gradient_descent(np.mat(X), np.mat(Yc).T, thetac, alpha)
        thetal = stochastic_gradient_descent(np.mat(X), np.mat(Yl).T, thetal, alpha)

        cnt += 1
        if cnt == limit:
            cnt = 0
            phase += 1000 * BATCH
            print('Already train {}, {} of all.'.format(phase, phase/N))
            costf = cost_function(np.mat(X), np.mat(Yf).T, thetaf)
            costc = cost_function(np.mat(X), np.mat(Yc).T, thetac)
            costl = cost_function(np.mat(X), np.mat(Yl).T, thetal)
            if costf < ambit and costc < ambit and costl < ambit:
                break
            costf_hist.append(costf)
            costc_hist.append(costc)
            costl_hist.append(costl)
    return thetaf, thetac, thetal, costf_hist, costc_hist, costl_hist
def getTrainData(chunk,nb_classes,img_rows,img_cols):
	X_train,Y_train = get_train_data(chunk,img_rows,img_cols)
	if (X_train!=None and Y_train!=None):
		X_train/=255
		Y_train = list(map(int, Y_train))
		Y_train=np_utils.to_categorical(Y_train,nb_classes)
	return (X_train,Y_train)
Beispiel #4
0
def getTrainData(chunk, nb_classes, img_rows, img_cols):
    X_train, Y_train = get_train_data(chunk, img_rows, img_cols)
    if (X_train != None and Y_train != None):
        X_train /= 255
        # X_train=X_train-np.average(X_train)
        # Y_train=np_utils.to_categorical(Y_train,nb_classes)
    return (X_train, Y_train)
Beispiel #5
0
def svm(X=None, y=None, X_test=None, y_test=None, i_time=10):
    '''
    函数说明:svm算法
    参数说明:
        i_time:生成模型取平均表现的个数
        X,y,X_test,y_test如果传值的话,会运行的快一些
    '''
    if X == None or y == None or X_test == None or y_test == None:
        one_hot_key = read_config['svm']['one_hot_key']
        standard_scaler = True
        X, y = read_data.get_train_data(one_hot_key=one_hot_key,
                                        standard_scaler=standard_scaler)
        X_test, y_test = read_data.get_test_data(
            one_hot_key=one_hot_key, standard_scaler=standard_scaler)
        '''
        del_index = np.append(np.where(y==2),np.where(y==3))
        X = np.delete(X,del_index,axis=0)
        y = np.delete(y,del_index,axis=0)

        del_index_test = np.append(np.where(y_test==2),np.where(y_test==3))
        X_test = np.delete(X_test,del_index_test,axis=0)
        y_test = np.delete(y_test,del_index_test,axis=0)
        '''
        X1 = np.copy(X)
        y1 = np.copy(y)
        X_test1 = np.copy(X_test)
        y_test1 = np.copy(y_test)

        y[y == 2] = 1
        y[y == 3] = 1
        y_test[y_test == 2] = 1
        y_test[y_test == 3] = 1
    hyper_paras = get_section('svm')

    for i in np.linspace(3, 13, 250):
        #print(type(hyper_paras['kernel']),type(hyper_paras['degree']),type(hyper_paras['gamma']),type(hyper_paras['shrinking']),type(hyper_paras['c']))
        # 这里重点:使用类别平衡算法

        #svm_clf = SVC(kernel=hyper_paras['kernel'],class_weight='balanced')
        svm_clf = SVC(class_weight={0: 1, 1: i})
        '''
        svm_clf = SVC(kernel=hyper_paras['kernel'],
                      degree=hyper_paras['degree'],
                      gamma=hyper_paras['gamma'],
                      shrinking=eval(hyper_paras['shrinking']),
                      C=hyper_paras['c'],
                      class_weight=hyper_paras['class_weight'])
        '''
        start = time.clock()
        svm_clf.fit(X, y)
        end = time.clock()
        train_time = end - start

        result = evaluate.calc_all(svm_clf, X_test, y_test, train_time)
        tools.dict_div(result, i_time)
        print(result)
Beispiel #6
0
from read_data import get_train_data, get_test_data, visualize_points
from keras.models import Sequential
from keras.layers import (Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Input)
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD, Adam
from skimage.io import imshow
from os.path import join
import glob
from skimage.io import imread
from skimage.transform import resize
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from matplotlib.patches import Circle

# Get the preprocessed train and test data
imgs_train, points_train = get_train_data()
imgs_test = get_test_data()


# Define the architecture
def get_model():
    model = Sequential()
    model.add(
        Conv2D(64,
               kernel_size=3,
               strides=2,
               padding='same',
               input_shape=(96, 96, 1),
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
    model.add(