コード例 #1
0
ファイル: parameters.py プロジェクト: everwind/DNNNLP
    def __init__(self, window_size=5, vocab_size=vocabulary.wordmap.len, embedding_size=100, hidden_size=10, seed=1):
        """
        Initialize L{Model} parameters.
        """

        self.vocab_size     = vocab_size
        self.window_size    = window_size
        self.embedding_size = embedding_size
        if 1==1:
            self.hidden_size    = hidden_size
            self.output_size    = 1

        import numpy
        import hyperparameters

        from pylearn.algorithms.weights import random_weights
        #numpy.random.seed(seed)
        self.embeddings = numpy.asarray((numpy.random.rand(self.vocab_size, embedding_size) - 0.5)*2 * 1.0, dtype=floatX)
        isnormalize=1
        if isnormalize==1: self.normalize(range(self.vocab_size))
        if 1==1:
            self.hidden_weights = shared(numpy.asarray(random_weights(self.input_size, self.hidden_size, scale_by=1.0), dtype=floatX))
            self.output_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.output_size, scale_by=1.0), dtype=floatX))
            self.hidden_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
            self.output_biases = shared(numpy.asarray(numpy.zeros((self.output_size,)), dtype=floatX))
コード例 #2
0
ファイル: parameters.py プロジェクト: everwind/DNNNLP
    def __init__(self,
                 window_size=5,
                 vocab_size=vocabulary.wordmap.len,
                 embedding_size=100,
                 hidden_size=10,
                 seed=1):
        """
        Initialize L{Model} parameters.
        """

        self.vocab_size = vocab_size
        self.window_size = window_size
        self.embedding_size = embedding_size
        if 1 == 1:
            self.hidden_size = hidden_size
            self.output_size = 1

        import numpy
        import hyperparameters

        from pylearn.algorithms.weights import random_weights
        #numpy.random.seed(seed)
        self.embeddings = numpy.asarray(
            (numpy.random.rand(self.vocab_size, embedding_size) - 0.5) * 2 *
            1.0,
            dtype=floatX)
        isnormalize = 1
        if isnormalize == 1: self.normalize(range(self.vocab_size))
        if 1 == 1:
            self.hidden_weights = shared(
                numpy.asarray(random_weights(self.input_size,
                                             self.hidden_size,
                                             scale_by=1.0),
                              dtype=floatX))
            self.output_weights = shared(
                numpy.asarray(random_weights(self.hidden_size,
                                             self.output_size,
                                             scale_by=1.0),
                              dtype=floatX))
            self.hidden_biases = shared(
                numpy.asarray(numpy.zeros((self.hidden_size, )), dtype=floatX))
            self.output_biases = shared(
                numpy.asarray(numpy.zeros((self.output_size, )), dtype=floatX))
コード例 #3
0
    def __init__(self, window_size, vocab_size, embedding_size, hidden_size, seed, initial_embeddings, two_hidden_layers):
        """
        Initialize L{Model} parameters.
        """

        self.vocab_size     = vocab_size
        self.window_size    = window_size
        self.embedding_size = embedding_size
        self.two_hidden_layers = two_hidden_layers
        if LBL:
            self.hidden_size    = hidden_size
            self.output_size    = self.embedding_size
        else:
            self.hidden_size    = hidden_size
            self.output_size    = 1

        import numpy
        import hyperparameters

        from pylearn.algorithms.weights import random_weights
        numpy.random.seed(seed)
        if initial_embeddings is None:
            self.embeddings = numpy.asarray((numpy.random.rand(self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"]) - 0.5)*2 * HYPERPARAMETERS["INITIAL_EMBEDDING_RANGE"], dtype=floatX)
        else:
            assert initial_embeddings.shape == (self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"])
            self.embeddings = copy.copy(initial_embeddings)
        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]: self.normalize(range(self.vocab_size))
        if LBL:
            self.output_weights = shared(numpy.asarray(random_weights(self.input_size, self.output_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
            self.output_biases = shared(numpy.asarray(numpy.zeros((1, self.output_size)), dtype=floatX))
            self.score_biases = shared(numpy.asarray(numpy.zeros(self.vocab_size), dtype=floatX))
            assert not self.two_hidden_layers
        else:
            self.hidden_weights = shared(numpy.asarray(random_weights(self.input_size, self.hidden_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
            self.hidden_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
            if self.two_hidden_layers:
                self.hidden2_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.hidden_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
                self.hidden2_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
            self.output_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.output_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
            self.output_biases = shared(numpy.asarray(numpy.zeros((self.output_size,)), dtype=floatX))
コード例 #4
0
from os.path import join
import cPickle
import random

random.seed(HYPERPARAMETERS["random seed"])
N.random.seed(HYPERPARAMETERS["random seed"])

IDIM = featuremap.len
ODIM = labelmap.len
HID = HYPERPARAMETERS["hidden dimensions"]
LR = HYPERPARAMETERS["learning rate"]
HLAYERS = HYPERPARAMETERS["hidden layers"]

from pylearn.algorithms.weights import random_weights

w1 = random_weights(IDIM, HID)
b1 = N.zeros(HID)
if HLAYERS == 2:
    wh = random_weights(HID, HID)
    bh = N.zeros(HID)
w2 = random_weights(HID, ODIM)
b2 = N.zeros(ODIM)

import graph


def abs_prehidden(prehidden, str="Prehidden"):
    abs_prehidden = N.abs(prehidden)
    med = N.median(abs_prehidden)
    abs_prehidden = abs_prehidden.tolist()
    assert len(abs_prehidden) == 1
コード例 #5
0
ファイル: train.py プロジェクト: Sandy4321/parser-model
import math
from os.path import join
import cPickle
import random

random.seed(HYPERPARAMETERS["random seed"])
N.random.seed(HYPERPARAMETERS["random seed"])

IDIM = featuremap.len
ODIM = labelmap.len
HID = HYPERPARAMETERS["hidden dimensions"]
LR = HYPERPARAMETERS["learning rate"]
HLAYERS = HYPERPARAMETERS["hidden layers"]

from pylearn.algorithms.weights import random_weights
w1 = random_weights(IDIM, HID)
b1 = N.zeros(HID)
if HLAYERS == 2:
    wh = random_weights(HID, HID)
    bh = N.zeros(HID)
w2 = random_weights(HID, ODIM)
b2 = N.zeros(ODIM)

import graph

def abs_prehidden(prehidden, str="Prehidden"):
    abs_prehidden = N.abs(prehidden)
    med = N.median(abs_prehidden)
    abs_prehidden = abs_prehidden.tolist()
    assert len(abs_prehidden) == 1
    abs_prehidden = abs_prehidden[0]
コード例 #6
0
    def __init__(self, window_size, vocab_size, embedding_size, hidden_size,
                 seed, initial_embeddings, two_hidden_layers):
        """
        Initialize L{Model} parameters.
        """

        self.vocab_size = vocab_size
        self.window_size = window_size
        self.embedding_size = embedding_size
        self.two_hidden_layers = two_hidden_layers
        if LBL:
            self.hidden_size = hidden_size
            self.output_size = self.embedding_size
        else:
            self.hidden_size = hidden_size
            self.output_size = 1

        import numpy
        import hyperparameters

        from pylearn.algorithms.weights import random_weights
        numpy.random.seed(seed)
        if initial_embeddings is None:
            self.embeddings = numpy.asarray(
                (numpy.random.rand(self.vocab_size,
                                   HYPERPARAMETERS["EMBEDDING_SIZE"]) - 0.5) *
                2 * HYPERPARAMETERS["INITIAL_EMBEDDING_RANGE"],
                dtype=floatX)
        else:
            assert initial_embeddings.shape == (
                self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"])
            self.embeddings = copy.copy(initial_embeddings)
        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
            self.normalize(range(self.vocab_size))
        if LBL:
            self.output_weights = shared(
                numpy.asarray(random_weights(
                    self.input_size,
                    self.output_size,
                    scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]),
                              dtype=floatX))
            self.output_biases = shared(
                numpy.asarray(numpy.zeros((1, self.output_size)),
                              dtype=floatX))
            self.score_biases = shared(
                numpy.asarray(numpy.zeros(self.vocab_size), dtype=floatX))
            assert not self.two_hidden_layers
        else:
            self.hidden_weights = shared(
                numpy.asarray(random_weights(
                    self.input_size,
                    self.hidden_size,
                    scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]),
                              dtype=floatX))
            self.hidden_biases = shared(
                numpy.asarray(numpy.zeros((self.hidden_size, )), dtype=floatX))
            if self.two_hidden_layers:
                self.hidden2_weights = shared(
                    numpy.asarray(random_weights(
                        self.hidden_size,
                        self.hidden_size,
                        scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]),
                                  dtype=floatX))
                self.hidden2_biases = shared(
                    numpy.asarray(numpy.zeros((self.hidden_size, )),
                                  dtype=floatX))
            self.output_weights = shared(
                numpy.asarray(random_weights(
                    self.hidden_size,
                    self.output_size,
                    scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]),
                              dtype=floatX))
            self.output_biases = shared(
                numpy.asarray(numpy.zeros((self.output_size, )), dtype=floatX))
コード例 #7
0
ファイル: train.py プロジェクト: Sandy4321/parser-model
xinstances = SS.lil_matrix((n, DIM))
for (i, ex) in enumerate(xvals):
    for j in ex:
        xinstances[i,j] = 1.
xinstances = MTYPE(xinstances)

targets = N.array(yvals)
targ1 = targets[0,:]

xR = TMTYPE('x')
#print targ1.shape
targR = TT.dvector("targ")
#print xR, targR

from pylearn.algorithms.weights import random_weights
w1 = random_weights(DIM, HID)
print "w1", w1, w1.shape, w1.dtype
b1 = N.zeros(HID)
print "b1", b1, b1.shape, b1.dtype
w2 = random_weights(HID, 1)
print "w2", w2, w2.shape, w2.dtype
b2 = N.zeros(1)
print "b2", b2, b2.shape, b2.dtype

#random_weights
w1R = TT.dmatrix('w1')
b1R = TT.dvector('b1')
w2R = TT.dmatrix('w2')
b2R = TT.dvector('b2')

import pylearn.algorithms.cost as cost