Пример #1
0
        # set up the model
        dnn_shared = None
        shared_layers = []
        hidden_layers = shared_layers_sizes + task_specific_sizes[n]
        # use the first networks shared layers dawg
        if n > 0:
            dnn_shared = dnn_array[0]
            shared_layers = [m for m in xrange(shared_layers_num)]

        # create the network for the task
        # you can change the input dropout factor and the general dropout factor
        # look at the DNNDropout class
        if shareLayers:
            dnn = DNNDropout(np_rng=np_rng, theano_rng=theano_rng, hidden_layers_sizes=hidden_layers,
                            n_ins=input_size, n_outs=output_size,
                            input_dropout_factor=0.0, dropout_factor=0.0,
                            dnn_shared=dnn_shared, shared_layers=shared_layers)
        else:
            dnn = DNNDropout(np_rng=np_rng, theano_rng=theano_rng, hidden_layers_sizes=hidden_layers,
                            n_ins=input_size, n_outs=output_size,
                            input_dropout_factor=0.1, dropout_factor=0.5)
        # add dnn and the functions to the list
        dnn_array.append(dnn)

    # # consider the tasks which have nonzero learning rate
    # active_tasks = [n for n in xrange(num_tasks)]

    log('> ... bootstrapping all tasks datasets and building the functions')

    # keep track of the training error in order to create the train/validation
    # curve
Пример #2
0
        # set up the model
        dnn_shared = None
        shared_layers = []
        hidden_layers = shared_layers_sizes + task_specific_sizes[n]
        # use the first networks shared layers dawg
        if n > 0:
            dnn_shared = dnn_array[0]
            shared_layers = [m for m in xrange(shared_layers_num)]

        # create the network for the task
        # you can change the input dropout factor and the general dropout factor
        # look at the DNNDropout class
        if shareLayers:
            dnn = DNNDropout(np_rng=np_rng, theano_rng=theano_rng, hidden_layers_sizes=hidden_layers,
                            n_ins=input_size, n_outs=output_size,
                            input_dropout_factor=0.0, dropout_factor=0.0,
                            dnn_shared=dnn_shared, shared_layers=shared_layers)
        else:
            dnn = DNNDropout(np_rng=np_rng, theano_rng=theano_rng, hidden_layers_sizes=hidden_layers,
                            n_ins=input_size, n_outs=output_size,
                            input_dropout_factor=0.1, dropout_factor=0.5)
        # add dnn and the functions to the list
        dnn_array.append(dnn)

    # # consider the tasks which have nonzero learning rate
    # active_tasks = [n for n in xrange(num_tasks)]

    test_in, test_out, test_tasks = get_test_data()
    complete = np.hstack((test_tasks.reshape((-1,1)),test_in,test_out.reshape((-1,1)) ))
    total = 0.0
    testin  = []