Beispiel #1
0
    print 'batch_size_train', batch_size_train
    print 'batch_size_test', batch_size_test
    print 'lambda_r:', lambda_r
    print 'vali_test', vali_test
    print 'epoch', epoch
    print


'''*************************main****************************'''
'''*************************main****************************'''
for i in range(1):
    # dataset list
    dataset_list = ['', '_Women', '_Men', '_CLothes', '_Shoes', '_Jewelry']
    # load data
    [train_data, train_data_aux, validate_data, test_data, P,
     Q] = readdata(dataset_list[dataset])
    # load feature
    [E, F] = get_feature(dataset)
    '''for i in range(len(E)):
        E[i] = E[i] / math.sqrt(mat(E[i]) * mat(E[i]).T)
    for i in range(len(F)):
        F[i] = F[i] / math.sqrt(mat(F[i]) * mat(F[i]).T)'''
    E = E[:, 0:K1]
    F = F[:, 0:K2]

    Fc = get_feature_cnn(dataset_list[dataset])
    Kc = len(Fc[0])

    # select validate or test dataset
    if vali_test == 0:
        Test = validate_data
Beispiel #2
0
#This assumes that the input data is of length 4

#my packages
import readdata

#other packages
import numpy as np
import tensorflow as tf

#Get Data
trainx, trainy = readdata.readdata(999)

testx, testy = readdata.readdata(100)

numtrain = len(trainx)
numtest = len(testx)

#hyperparameters
learning_rate = 0.5
epochs = 30

#placeholders
X = tf.placeholder("float32")
Y = tf.placeholder("float32")

#model weights
#tf.variables are trainable by default
W = tf.Variable(np.array(np.random.rand(1, 4), dtype='float32'), name="weight")

#pred = tf.reduce_sum(tf.multiply(X, W))
pred = tf.matmul(W, X)
import numpy as np

# importing the database
import readdata
df = readdata.readdata('database.sqlite')
print(df.head())

#Shuffle the rows of df so we get a distributed sample when we display top few rows
df = df.reindex(np.random.permutation(df.index))

# Defining the X anf y parameters
X = df.iloc[:,1:]   # 'other than overall rating, i.e. column 1'
y = df.iloc[:,0]    #  Need to predict 'Overall rating' 

# Identifying the unique values for preferred_foot colomns
print("Unique values in preferred_foot feature are: ", end=" ")
print(df.preferred_foot.unique())
print("Lets apply one Hot encoding to preferred_foot feature")

# Applying One hot encoding on only catagorical columns, i.e. preferred_foot
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
X.preferred_foot = label_encoder.fit_transform(X.preferred_foot)

# Splitting the train and test data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.10,random_state=0)

# Find the most significant features using backward elemination 
print('-'*50)
import backwardEle
Beispiel #4
0
#my packages
import readdata

#other packages
import numpy as np
import tensorflow as tf
import pickle


#######change lbl to a list of vectors, not integers###########




#Get Data
trainimg,trainlbl=readdata.readdata('train')
testimg,testlbl=readdata.readdata('test')

xshape,yshape,dummy = np.shape(trainimg[0])
numtrain = len(trainimg)
numtest = len(testimg)


#hyperparameters
learning_rate = 0.0001#3e-3 #tf documentation says default = 0.001 
beta1=0.94 #0.9
beta2=0.999 #0.999
epsilon=1e-8 #1e-8


dropout_rate = 0.80