コード例 #1
0
ファイル: plot_dreams.py プロジェクト: sonyeric/kaggle-dr
def load_column(model_file, batch_size, layer_idx_of_interest, min_filter_idx,
                max_filter_idx, train_dataset, train_labels_csv_path, center,
                normalize, train_flip, test_dataset, random_seed,
                valid_dataset_size, filter_shape, cuda_convnet):
    print("Loading Model...")
    f = open(model_file)
    _batch_size, init_learning_rate, momentum, leak_alpha, model_spec, loss_type, num_output_classes, pad, image_shape = cPickle.load(
        f)
    data_stream = DataStream(train_image_dir=train_dataset,
                             train_labels_csv_path=train_labels_csv_path,
                             image_shape=image_shape,
                             batch_size=batch_size,
                             cache_size_factor=1,
                             center=center,
                             normalize=normalize,
                             train_flip=train_flip,
                             test_image_dir=test_dataset,
                             random_seed=random_seed,
                             valid_dataset_size=valid_dataset_size)
    f.close()

    column = DreamNet(layer_idx_of_interest, min_filter_idx, max_filter_idx,
                      data_stream, batch_size, init_learning_rate, momentum,
                      leak_alpha, model_spec, loss_type, num_output_classes,
                      pad, image_shape, filter_shape, cuda_convnet)
    column.restore(model_file)
    return column
コード例 #2
0
ファイル: predict.py プロジェクト: sainiudit/kaggle-dr
def load_column(model_file, train_dataset, center, normalize, train_flip,
                test_dataset, random_seed, valid_dataset_size, filter_shape):
    print("Loading Model...")
    f = open(model_file)
    batch_size, init_learning_rate, momentum, leak_alpha, model_spec, loss_type, num_output_classes, pad, image_shape = cPickle.load(
        f)
    f.close()

    data_stream = DataStream(train_image_dir=train_dataset,
                             batch_size=batch_size,
                             image_shape=image_shape,
                             center=center,
                             normalize=normalize,
                             train_flip=train_flip,
                             test_image_dir=test_dataset,
                             random_seed=random_seed,
                             valid_dataset_size=valid_dataset_size)
    column = VGGNet(data_stream, batch_size, init_learning_rate, momentum,
                    leak_alpha, model_spec, loss_type, num_output_classes, pad,
                    image_shape, filter_shape)
    column.restore(model_file)
    return column
コード例 #3
0
def init_and_train(network, init_learning_rate, momentum, max_epochs, train_dataset,
                 train_labels_csv_path, batch_size, leak_alpha, center, normalize, amplify,
                 as_grey, num_output_classes, decay_patience, decay_factor,
                 decay_limit, loss_type, validations_per_epoch, train_flip,
                 shuffle, test_dataset, random_seed, valid_dataset_size,
                 noise_decay_start, noise_decay_duration, noise_decay_severity,
                 valid_flip, test_flip, sample_class, custom_distribution,
                 train_color_cast, valid_color_cast, test_color_cast,
                 color_cast_range, override_input_size, model_file, filter_shape,
                 cache_size_factor, cuda_convnet, pre_train_crop, train_crop, valid_test_crop,
                 image_extension, print_confusion_mat):
    runid = "%s-%s-%s" % (str(uuid.uuid4())[:8], network, loss_type)
    print("[INFO] Starting runid %s" % runid)
    if custom_distribution and sample_class: # lame hardcode
        print("[INFO] %.2f current epochs equals 1 BlockDesigner epoch" % ((274.0*numpy.array(custom_distribution)) / numpy.array(ACTUAL_TRAIN_DR_PROPORTIONS))[sample_class])

    model_spec, image_shape, pad = load_model_specs(network, as_grey, override_input_size)
    data_stream = DataStream(train_image_dir=train_dataset, train_labels_csv_path=train_labels_csv_path, image_shape=image_shape, cache_size_factor=cache_size_factor, batch_size=batch_size, center=center, normalize=normalize, amplify=amplify, train_flip=train_flip, shuffle=shuffle, test_image_dir=test_dataset, random_seed=random_seed, valid_dataset_size=valid_dataset_size, valid_flip=valid_flip, test_flip=test_flip, sample_class=sample_class, custom_distribution=custom_distribution, train_color_cast=train_color_cast, valid_color_cast=valid_color_cast, test_color_cast=test_color_cast, color_cast_range=color_cast_range, pre_train_crop=pre_train_crop, train_crop=train_crop, valid_test_crop=valid_test_crop, image_extension=image_extension)

    if model_file:
        f = open(model_file)
        batch_size, init_learning_rate, momentum, leak_alpha, model_spec, loss_type, num_output_classes, pad, image_shape = cPickle.load(f)
        f.close()

        column = VGGNet(data_stream, batch_size, init_learning_rate, momentum, leak_alpha, model_spec, loss_type, num_output_classes, pad, image_shape, filter_shape, cuda_convnet=cuda_convnet, runid=runid)
        column.restore(model_file)
    else:
        column = VGGNet(data_stream, batch_size, init_learning_rate, momentum, leak_alpha, model_spec, loss_type, num_output_classes, pad, image_shape, filter_shape, cuda_convnet=cuda_convnet, runid=runid)

    try:
        column.train(max_epochs, decay_patience, decay_factor, decay_limit, noise_decay_start, noise_decay_duration, noise_decay_severity, validations_per_epoch, print_confusion_mat)
    except KeyboardInterrupt:
        print "[ERROR] User terminated Training, saving results"
    except UnsupportedPredictedClasses as e:
        print "[ERROR] UnsupportedPredictedClasses {}, saving results".format(e.args[0])
    column.save("%s_final" % runid)
    save_results(runid, [[column.historical_train_losses, column.historical_val_losses, column.historical_val_kappas, column.n_train_batches], [column.learning_rate_decayed_epochs]])
    print(time.strftime("Finished at %H:%M:%S on %Y-%m-%d"))