def getModel(inpSize, filterWidth) : ls = MS.GradientDescent(lr = 0.5) cost = MC.NegativeLogLikelihood() pooler = MCONV.MaxPooling2D(1, 2) i = ML.Input(inpSize, name = 'inp') ichan = MCONV.InputChanneler(1, inpSize, name = 'inpChan') c1 = MCONV.Convolution2D( nbFilters = 5, filterHeight = 1, filterWidth = filterWidth, activation = MA.ReLU(), pooler = pooler, name = "conv1" ) c2 = MCONV.Convolution2D( nbFilters = 10, filterHeight = 1, filterWidth = filterWidth, activation = MA.ReLU(), pooler = pooler, name = "conv2" ) f = MCONV.Flatten(name = "flat") h = ML.Hidden(5, activation = MA.ReLU(), decorators = [], regularizations = [ ], name = "hid" ) o = ML.SoftmaxClassifier(2, decorators = [], learningScenario = ls, costObject = cost, name = "out", regularizations = [ ] ) model = i > ichan > c1 > c2 > f > h > o return model
def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(2, 2) #The input channeler will take regular layers and arrange them into several channels i = MCONV.Input(nbChannels=3, height=256, width=256, name='inp') #ichan = MCONV.InputChanneler(256, 256, name = 'inpChan') c1 = MCONV.Convolution2D(nbFilters=3, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv1") c2 = MCONV.Convolution2D(nbFilters=3, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.Tanh(), decorators=[MD.GlorotTanhInit()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c2 > f > h > o
def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(2, 2) i = MCONV.Input(nbChannels=1, height=28, width=28, name='inp') c1 = MCONV.Convolution2D(nbFilters=20, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv1") c2 = MCONV.Convolution2D(nbFilters=50, filterHeight=5, filterWidth=5, activation=MA.Tanh(), pooler=maxPool, name="conv2") #needed for the transition to a fully connected layer f = MCONV.Flatten(name="flat") h = ML.Hidden(500, activation=MA.Tanh(), decorators=[], regularizations=[], name="hid") o = ML.SoftmaxClassifier(10, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c2 > f > h > o print self.model
def __init__(self, ls, cost): maxPool = MCONV.MaxPooling2D(3, 3) i = MCONV.Input(nbChannels=1, height=100, width=100, name='inp') c1 = MCONV.Convolution2D(nbFilters=10, filterHeight=3, filterWidth=3, activation=MA.Max_norm(), pooler=maxPool, name="conv1") c3 = MCONV.Convolution2D(nbFilters=20, filterHeight=3, filterWidth=3, activation=MA.Max_norm(), pooler=maxPool, name="conv3") c2 = MCONV.Convolution2D(nbFilters=10, filterHeight=3, filterWidth=3, activation=MA.Max_norm(), pooler=maxPool, name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(2048, activation=MA.Max_norm(), decorators=[MD.BinomialDropout(0.7)], regularizations=[], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > c1 > c3 > c2 > f > h > o
def __init__(self, inputSize, dictSize, patternSize, embSize, ls, cost): # pooler = MCONV.NoPooling() pooler = MCONV.MaxPooling2D(1, 2) emb = MCONV.Embedding(size=inputSize, nbDimentions=embSize, dictSize=dictSize, name='Emb') c1 = MCONV.Convolution2D(nbFilters=1, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=pooler, name="conv1") c2 = MCONV.Convolution2D(nbFilters=4, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=MCONV.NoPooling(), name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.ReLU(), decorators=[], regularizations=[], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = emb > c1 > c2 > f > h > o
def __init__(self, inputSize, patternSize, ls, cost): # pooler = MCONV.NoPooling() pooler = MCONV.MaxPooling2D(1, 2) #The input channeler will take regular layers and arrange them into several channels i = ML.Input(inputSize, name='inp') ichan = MCONV.InputChanneler(1, inputSize, name='inpChan') c1 = MCONV.Convolution2D(nbFilters=5, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=pooler, name="conv1") c2 = MCONV.Convolution2D(nbFilters=10, filterHeight=1, filterWidth=patternSize / 2, activation=MA.ReLU(), pooler=MCONV.NoPooling(), name="conv2") f = MCONV.Flatten(name="flat") h = ML.Hidden(5, activation=MA.ReLU(), decorators=[], regularizations=[], name="hid") o = ML.SoftmaxClassifier(2, decorators=[], learningScenario=ls, costObject=cost, name="out", regularizations=[]) self.model = i > ichan > c1 > c2 > f > h > o
def load_data(picklefile): tbl = pickle.load(open(picklefile,"rb")) return tbl def center(batch): mean = numpy.mean(batch) sd = numpy.std(batch) return (batch-mean)/sd def classes(targets): x = [numpy.abs(numpy.argmax(i)-1) for i in targets[0]] return x maxPool = MCONV.MaxPooling2D(2,2) ls = MS.MomentumGradientDescent(lr = 1e-1, momentum = 0.95) cost = MC.NegativeLogLikelihood() miniBatchSize = 100 trainfile = "../flip_grey_train_dataset.p" validfile = "../flip_grey_valid_dataset.p" testfile = "../test_set.p" runprefix = "HD2" i = MCONV.Input(nbChannels = 1, height = 100, width = 100, name = 'inp') c1 = MCONV.Convolution2D( nbFilters = 15, filterHeight = 3, filterWidth = 3,