Esempio n. 1
0
from keras.optimizers import Adam, RMSprop
import numpy as np
import pandas as pd
from define_model import create_model
from utilities import get_image_tensor, get_callbacks
from sklearn.model_selection import StratifiedKFold, train_test_split

# Set up data
iceberg = pd.read_json('data/train.json')
train_data, test_data = train_test_split(iceberg,
                                         train_size=.75,
                                         random_state=123)

train_images = get_image_tensor(train_data)
test_images = get_image_tensor(test_data)

# Run Experiments on adam lr
lrs = [0.002, 0.001, 0.0005]
for i in range(3):
    model = create_model()
    optimizer = Adam(lr=lrs[i])
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    hist = model.fit(train_images,
                     train_data.is_iceberg,
                     epochs=1,
                     batch_size=101,
                     callbacks=get_callbacks(
                         'data/train_weights_adam' + str(i) + '.hdf5', 4),
Esempio n. 2
0
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from utilities import get_image_tensor
from define_models import create_model

score_data_location = 'data/test.hdf'

hyper_p = ('elu', 3, 2)
model = create_model(2, False, False)

model.load_weights('data/train_weightsnone_.hdf5')

model.compile(optimizer = Adam(), loss = 'binary_crossentropy',
	metrics = ['accuracy'])

submission_data = pd.read_hdf(score_data_location)
#submission_data.to_hdf('data/test.hdf', 'data', mode = 'w')
submission = pd.DataFrame()
submission['id'] = submission_data['id']
submission_data = get_image_tensor(submission_data, extra_channel = 'none')

results = model.predict(submission_data)
results = results.reshape(results.shape[0])

submission['is_iceberg'] = results
submission.to_csv('data/submission.csv', index = False)
#print(model.evaluate(x_test, test_target))
# Decent improvement so far, up to 71% test set
Esempio n. 3
0
from keras.optimizers import Adam, RMSprop
import numpy as np
import pandas as pd
from define_model import create_model
from utilities import get_image_tensor, get_callbacks
from sklearn.model_selection import StratifiedKFold, train_test_split

model = create_model(2, False, False)
optim = RMSprop(lr=0.0001)
model.compile(optimizer=optim,
              loss='binary_crossentropy',
              metrics=['accuracy'])

iceberg = pd.read_json('data/train.json')
iceberg['inc_angle'] = iceberg['inc_angle'].replace('na', -1).astype(float)
train_data, test_data = train_test_split(iceberg,
                                         train_size=.75,
                                         random_state=123)

train_images = get_image_tensor(train_data, 'none')
test_images = get_image_tensor(test_data, 'none')

hist = model.fit(train_images,
                 train_data.is_iceberg,
                 epochs=30,
                 batch_size=13,
                 callbacks=get_callbacks('data/train_weights.hdf5', 5),
                 validation_data=(test_images, test_data.is_iceberg))

print(hist)
pd.DataFrame(hist.history).to_csv('data/smaller_batch.csv')
Esempio n. 4
0
score_data_location = '../architecture_experiments/data/test.hdf'

model = create_model(nchannels=2,
                     base_size=42,
                     drop=0.06767,
                     activation='relu',
                     normalize_batches=False,
                     angle=False)

model.load_weights('data/train_weights_9.hdf5')

model.compile(optimizer=Adam(),
              loss='binary_crossentropy',
              metrics=['accuracy'])

submission_data = pd.read_hdf(score_data_location)
#submission_data.to_hdf('data/test.hdf', 'data', mode = 'w')
submission = pd.DataFrame()
submission['id'] = submission_data['id']
submission_data = get_image_tensor(submission_data,
                                   extra_channel='none',
                                   normalize=False)

results = model.predict(submission_data)
results = results.reshape(results.shape[0])

submission['is_iceberg'] = results
submission.to_csv('data/submission.csv', index=False)
#print(model.evaluate(x_test, test_target))
# Decent improvement so far, up to 71% test set
Esempio n. 5
0
from utilities import get_image_tensor, get_callbacks
from sklearn.model_selection import StratifiedKFold, train_test_split

# Set up data
iceberg = pd.read_json('data/train.json')
iceberg['inc_angle'] = iceberg['inc_angle'].replace('na', -1)
train_data, test_data = train_test_split(iceberg,
                                         train_size=.75,
                                         random_state=123)

pars = [(chan, norm, angle) for chan in ('none', 'diff', 'avg')
        for angle in (True, False) for norm in (True, False)]

# Run Experiments on
for p in pars:
    train_images = get_image_tensor(train_data, extra_channel=p[0])
    test_images = get_image_tensor(test_data, extra_channel=p[0])

    if p[0] == 'none':
        model = create_model(2, p[1], p[2])
    else:
        model = create_model(3, p[1], p[2])

    optimizer = mypotim = RMSprop(lr=0.001)
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    pars_suffix = p[0] + '_' + 'norm_' * p[1] + 'angle_' * p[2]
    print(pars_suffix)
Esempio n. 6
0
}

# Set up data
iceberg = pd.read_json('data/train.json')
train_data, test_data = train_test_split(iceberg,
                                         train_size=.8,
                                         random_state=123)

# Run Experiments on
for i in range(hyperparameter_bounds['n_experiments']):

    hyp = get_hyperparameters(hyperparameter_bounds)
    print(hyp)

    train_images = get_image_tensor(train_data,
                                    extra_channel='none',
                                    normalize=False)
    test_images = get_image_tensor(test_data,
                                   extra_channel='none',
                                   normalize=False)

    model = create_model(nchannels=2,
                         base_size=hyp['base_size'],
                         drop=hyp['dropout'],
                         activation=hyp['activation'],
                         normalize_batches=hyp['bn'],
                         angle=False)

    model.compile(optimizer=Adam(lr=hyp['lr']),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
Esempio n. 7
0
iceberg = pd.read_json('data/train.json')
iceberg['inc_angle'] = iceberg['inc_angle'].replace('na', -1)
train_data, test_data = train_test_split(iceberg,
                                         train_size=.8,
                                         random_state=123)

pars = [(learn, normalize) for learn in (0.0005, 0.0002, 0.0001)
        for normalize in (True, False)]
j = 20
# Run Experiments on
for p in pars:

    j += 1

    train_images = get_image_tensor(train_data,
                                    extra_channel='avg',
                                    normalize=p[1])
    test_images = get_image_tensor(test_data,
                                   extra_channel='avg',
                                   normalize=p[1])

    model = create_model(3, False, False)

    optimizer = Adam(lr=p[0])

    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    hist = model.fit(train_images,
                     train_data.is_iceberg,