Ejemplo n.º 1
0
options = yaml.load(f)
f.close()

num_train = options['num_train']
num_val = options['num_val']
num_filters, filter_size = options['filters'], options['filter_size']
weight = float(options['weight_scale'])
hidden_dim = options['hidden_dim']
reg = float(options['reg'])
num_epochs = options['num_epochs']
batch_size = options['batch_size']
learning_rate = float(options['learning_rate'])
lr_decay = float(options['lr_decay'])

# read in data
X_train, y_train, X_test, y_test, X_val, y_val = process_data.read_faces_csv(
    csv_file)

N_train, _, H, W = X_train.shape
N_val, _, _, _ = X_val.shape

data = {'X_train' : X_train[:num_train,:,:,:], 'y_train' : y_train[:num_train], \
  "X_val" : X_val[:num_val,:,:,:], "y_val" : y_val[:num_val] }

print "Done loading data"

model = ThreeLayerConvNet(input_dim=(1, H, W), num_classes=7, num_filters=num_filters, filter_size=filter_size, \
       weight_scale=weight, hidden_dim=hidden_dim, reg=reg)

solver = Solver(model,
                data,
                num_epochs=num_epochs,
Ejemplo n.º 2
0
num_val = 100
num_total = num_val + num_train
num_filters = 32
num_filters2 = 64
filter_size = 5
num_classes = 7
weight_scale = 0.002088691359
hidden_dim = 500
reg = 0.000511417045343
num_epochs = 5
batch_size = 64
learning_rate = 0.000135139484703 * 100000000
lr_decay = 0.95

# read in data
X_train, y_train, X_test, y_test, X_val, y_val = process_data.read_faces_csv(csv_file, num_total, use_tensorflow = True)

_, H, W, _ = X_train.shape

# Split data into training and validation
X_val = X_train[-num_val:,:,:,:]
y_val = y_train[-num_val:]

X_train = X_train[:num_train,:,:,:]
y_train = y_train[:num_train]

# Convert y to one hot
y_train_one_hot = np.zeros((num_train, num_classes))
y_train_one_hot[np.arange(num_train), y_train.astype(int)] = 1

y_val_one_hot = np.zeros((num_val, num_classes))
options = yaml.load(f)
f.close()

num_train = options['num_train']
num_val = options['num_val']
num_filters, filter_size = options['filters'], options['filter_size']
weight = float(options['weight_scale'])
hidden_dim = options['hidden_dim']
reg = float(options['reg'])
num_epochs = options['num_epochs']
batch_size = options['batch_size']
learning_rate = float(options['learning_rate'])
lr_decay = float(options['lr_decay'])

# read in data
X_train, y_train, X_test, y_test, X_val, y_val = process_data.read_faces_csv(csv_file)


# create mirrored training image, and append on X_train
N,_, row, col = X_train.shape
X_train_mirrored = np.zeros(X_train.shape)
y_train_mirrored = y_train

print X_train[0]
for i in xrange(N):
    for r in xrange(row):
        for c in xrange(col/2):
            X_train_mirrored[i,:,r,c] = X_train[i,:,r,col-1-c]
            X_train_mirrored[i,:,r,col-1-c] = X_train[i,:,r,c]