patch_size = (8, 8)

    cs_args = {
        "train_args":{
         "L1_reg": 1e-06,
         "learning_rate": 0.05,
         "L2_reg": 1e-05,
         "nepochs": 1,
         "cost_type": "crossentropy",
         "save_exp_data": False,
         "batch_size": 200,
         "normalize_weights": False
        },
        "test_args":{
         "save_exp_data":False,
         "batch_size": 200
        }
    }

    no_of_patches = 64
    print "starting pretrain"

    prmlp = PatchBasedMLP(pre_input,
    n_in=patch_size[0] * patch_size[1],
    n_hiddens=[2048, 2048],
    n_out=11,
    patch_size=patch_size)

    incremental_data_experiment(prmlp, train_datasets, test_datasets, no_of_patches=no_of_patches,
    patch_size=patch_size, **cs_args)
        }
    }

    print "Starting the cross-validation"

    x = T.matrix('x')

    n_hiddens = [2048]

    train_set_patches, train_set_pre, train_set_labels = ds.Xtrain_patches, ds.Xtrain_presences, ds.Ytrain
    test_set_patches, test_set_pre, test_set_labels = ds.Xtest_patches, ds.Xtest_presences, ds.Ytest

    prmlp = PatchBasedMLP(x,
                          n_in=patch_size[0] * patch_size[1],
                          n_hiddens=n_hiddens,
                          n_out=11,
                          no_of_patches=no_of_patches,
                          activation=NeuralActivations.Rectifier,
                          use_adagrad=False,
                          quiet=True)

    costs, pretrain_probs = prmlp.train(train_set_patches, train_set_pre,
                                        **cs_args["train_args"])

    save_probs((pretrain_probs, train_set_labels), train_file)

    print "Testing on the training dataset."
    fin_test_score, post_test_train_probs = prmlp.test(train_set_patches,
                                                       train_set_pre,
                                                       **cs_args["test_args"])
    save_probs((post_test_train_probs, train_set_labels), train_test_file)
示例#3
0
            "batch_size": 200
        }
    }

    print "Starting the cross-validation"

    x = T.matrix('x')

    n_hiddens = [2048]

    train_set_patches, train_set_pre = ds.Xtrain_patches, ds.Xtrain_presences
    test_set_patches, test_set_pre = ds.Xtest_patches, ds.Xtest_presences

    prmlp = PatchBasedMLP(x,
                          n_in=patch_size[0] * patch_size[1],
                          n_hiddens=n_hiddens,
                          n_out=11,
                          no_of_patches=no_of_patches,
                          activation=NeuralActivations.Rectifier,
                          use_adagrad=False,
                          quiet=True)

    prmlp.train(train_set_patches, train_set_pre, **cs_args["train_args"])
    prmlp.save_data()

    print "Testing on the training dataset."
    prmlp.test(train_set_patches, train_set_pre, **cs_args["test_args"])

    print "Testing on the test dataset."
    prmlp.test(test_set_patches, test_set_pre, **cs_args["test_args"])
         "save_exp_data": False,
         "batch_size": 200
        }
    }

    print "Starting the cross-validation"

    x = T.matrix('x')

    n_hiddens = [2048]

    train_set_patches, train_set_pre, train_set_labels = ds.Xtrain_patches, ds.Xtrain_presences, ds.Ytrain
    test_set_patches, test_set_pre, test_set_labels = ds.Xtest_patches, ds.Xtest_presences, ds.Ytest

    prmlp = PatchBasedMLP(x, n_in=patch_size[0] * patch_size[1],
    n_hiddens=n_hiddens, n_out=11, no_of_patches=no_of_patches,
    activation=NeuralActivations.Rectifier, use_adagrad=False, quiet=True)

    costs, pretrain_probs = prmlp.train(train_set_patches, train_set_pre, **cs_args["train_args"])

    save_probs((pretrain_probs, train_set_labels), train_file)

    print "Testing on the training dataset."
    fin_test_score, post_test_train_probs = prmlp.test(train_set_patches, train_set_pre, **cs_args["test_args"])
    save_probs((post_test_train_probs, train_set_labels), train_test_file)

    print "Testing on the test dataset."
    fin_test_score, post_test_probs = prmlp.test(test_set_patches, test_set_pre, **cs_args["test_args"])
    save_probs((post_test_probs, test_set_labels), test_file)

示例#5
0
    data_path_40k = "/RQusagers/gulcehre/dataset/pentomino/pieces/pento64x64_40k_seed_39112222.npy"
    data_path = "/RQusagers/gulcehre/dataset/pentomino/experiment_data/pento64x64_80k_seed_39112222.npy"

    patch_size = (8, 8)
    ds.setup_pretraining_dataset(data_path=data_path_40k,
                                 patch_size=patch_size,
                                 normalize_inputs=False)

    x = T.matrix('x')
    n_hiddens = [1024, 768]
    no_of_patches = 64
    no_of_classes = 11

    prmlp = PatchBasedMLP(x,
                          n_in=patch_size[0] * patch_size[1],
                          n_hiddens=n_hiddens,
                          n_out=11,
                          no_of_patches=no_of_patches,
                          activation=NeuralActivations.Rectifier,
                          use_adagrad=False)

    params = [prmlp.params[0], prmlp.params[2], prmlp.params[4]]

    post_mlp = PostMLP(x,
                       n_in=no_of_patches * no_of_classes,
                       n_hiddens=n_hiddens,
                       n_out=1,
                       use_adagrad=False)

    pre_training(patch_mlp=prmlp, post_mlp=post_mlp, ds=ds)
示例#6
0
         "batch_size": 200,
         "normalize_weights": False
        },
        "test_args": {
         "save_exp_data": False,
         "batch_size": 200
        }
    }

    print "Starting the cross-validation"

    x = T.matrix('x')

    n_hiddens = [2048]

    train_set_patches, train_set_pre = ds.Xtrain_patches, ds.Xtrain_presences
    test_set_patches, test_set_pre = ds.Xtest_patches, ds.Xtest_presences

    prmlp = PatchBasedMLP(x, n_in=patch_size[0] * patch_size[1],
    n_hiddens=n_hiddens, n_out=11, no_of_patches=no_of_patches,
    activation=NeuralActivations.Rectifier, use_adagrad=False, quiet=True)

    prmlp.train(train_set_patches, train_set_pre, **cs_args["train_args"])
    prmlp.save_data()

    print "Testing on the training dataset."
    prmlp.test(train_set_patches, train_set_pre, **cs_args["test_args"])

    print "Testing on the test dataset."
    prmlp.test(test_set_patches, test_set_pre, **cs_args["test_args"])
示例#7
0
if __name__ == "__main__":
    print "Task has just started."
    print "Loading the dataset"
    ds = Dataset()
    patch_size = (8, 8)

    ds_path = \
    "/RQusagers/gulcehre/dataset/pentomino/experiment_data/pento64x64_80k_seed_39112222.npy"
    data_new =\
    "/RQusagers/gulcehre/dataset/pentomino/rnd_pieces/pento64x64_5k_seed_43112222_64patches_rnd.npy"

    data_new_40k =\
    "/RQexec/gulcehre/datasets/pentomino/pento_64x64_8x8patches/pento64x64_40k_64patches_seed_975168712_64patches.npy"

    ds.setup_pretraining_dataset(data_path=data_new_40k,
                                 patch_size=patch_size,
                                 normalize_inputs=False)
    pre_input = T.matrix('pre_input')
    n_hiddens = [2048]

    prmlp = PatchBasedMLP(pre_input,
                          n_in=8 * 8,
                          n_hiddens=n_hiddens,
                          n_out=11,
                          no_of_patches=64,
                          activation=NeuralActivations.Rectifier,
                          use_adagrad=False)

    csvm = CSVM()
    pre_training(prmlp, csvm, ds)