Example #1
0
    return l_out, l_ins


DUMP_PATH = '../kaggle_diabetic_retinopathy/dumps/2015_07_17_123003.pkl'
RAW_DUMP_PATH = '../kaggle_diabetic_retinopathy/dumps/2015_07_17_123003_PARAMSDUMP.pkl'
NEW_DUMP_PATH = '../kaggle_diabetic_retinopathy/dumps/2015_07_17_123003.updated.pkl'
IMG_DIR = '../train_ds2_crop/'

model_data = pickle.load(open(DUMP_PATH, 'r'))

train_labels = pd.read_csv(
    os.path.join('../kaggle_diabetic_retinopathy/data/trainLabels.csv'))
batch_size = model_data['batch_size']
output_dim = 5
chunk_size = model_data['chunk_size']
patient_ids = sorted(set(get_img_ids_from_iter(train_labels.image)))
no_transfo_params = model_data['data_loader_params']['no_transfo_params']
if 'paired_transfos' in model_data:
    paired_transfos = model_data['paired_transfos']
else:
    paired_transfos = False

# Overwrite the model w/ the updated model (one that is suitable for use w/ newer versions of lasagne).
l_out, l_ins = from_raw()
model_data['l_out'] = l_out
model_data['l_ins'] = l_ins
with open(NEW_DUMP_PATH, 'wb') as fh:
    pickle.dump(model_data, fh, protocol=-1)
print('Dumped new model')

output = nn.layers.get_output(l_out, deterministic=True)
                transfo_params=transfo_params,
                rand_values=chosen_values if paired_transfos else None)

            chunk_shape[2 * k + 1] = chunk_x[2 * k + 1].shape
            chunk_y[2 * k + 1] = labels[idx][1]

        yield chunk_x, chunk_dim, np.eye(5)[chunk_y].astype('float32'), \
            chunk_shape

# Get rid of relative imports.
main_dir = os.path.abspath(os.path.dirname(__file__))

import pandas as p
# Get all train ids to know if patient id is train or test.
train_labels = p.read_csv(os.path.join(main_dir, 'data/trainLabels.csv'))
all_train_patient_ids = set(get_img_ids_from_iter(train_labels.image))


def patches_gen_pairs_pseudolabel(images, labels, p_x=80, p_y=80,
                                  num_channels=3, chunk_size=1024,
                                  num_chunks=100, rng=np.random,
                                  prefix_train='data/train_ds5_crop/',
                                  prefix_test='data/test_ds5_crop/',
                                  transfo_params=None,
                                  paired_transfos=False):
    num_patients = len(images)

    for n in xrange(num_chunks):
        indices = rng.randint(0, num_patients, chunk_size // 2)

        chunk_x = np.zeros((chunk_size, num_channels, p_x, p_y),
Example #3
0
    [idx],
    output,
    givens=givens,
    on_unused_input='ignore'
)
# Do transformations per patient instead?
if 'paired_transfos' in model_data:
    paired_transfos = model_data['paired_transfos']
else:
    paired_transfos = False
    
#print paired_transfos
train_labels = p.read_csv(os.path.join('data/trainLabels.csv'))
#print train_labels.head(5)
# Get all patient ids.
patient_ids = sorted(set(get_img_ids_from_iter(train_labels.image)))
num_chunks = int(np.ceil((2 * len(patient_ids)) / float(chunk_size)))
# Where all the images are located: 
# it looks for [img_dir]/[patient_id]_[left or right].jpeg
#img_dir = '/storage/hpc_dmytro/Kaggle/DR/test/'
img_dir = '/storage/hpc_dmytro/Kaggle/DR/processed/run-stretch/test/'
#img_dir = '/storage/hpc_gagand87/train/jpeg/'
from generators import DataLoader
data_loader = DataLoader()
new_dataloader_params = model_data['data_loader_params']
new_dataloader_params.update({'images_test': patient_ids})
new_dataloader_params.update({'labels_test': train_labels.level.values})
new_dataloader_params.update({'prefix_train': img_dir})
data_loader.set_params(new_dataloader_params)
def do_pred(test_gen):
    outputs = []