示例#1
0
 def __init__(self, backend, dataset, subj):
     ad = {
         'type': 'adadelta',
         'lr_params': {
             'rho': 0.9,
             'epsilon': 0.000000001
         }
     }
     self.layers = []
     self.add(
         DataLayer(is_local=True,
                   nofm=dataset.nchannels,
                   ofmshape=[1, dataset.nsamples]))
     self.add(
         ConvLayer(nofm=64,
                   fshape=[1, 3],
                   activation=RectLin(),
                   lrule_init=ad))
     self.add(PoolingLayer(op='max', fshape=[1, 2], stride=2))
     if subj != 2:
         self.add(FCLayer(nout=128, activation=RectLin(), lrule_init=ad))
     self.add(
         FCLayer(nout=dataset.nclasses,
                 activation=Logistic(),
                 lrule_init=ad))
     self.add(CostLayer(cost=CrossEntropy()))
     self.model = MLP(num_epochs=1, batch_size=128, layers=self.layers)
     self.dataset = dataset
示例#2
0
def create_model(nin):
    layers = []
    layers.append(DataLayer(nout=nin))
    layers.append(FCLayer(nout=100, activation=RectLin()))
    layers.append(FCLayer(nout=10, activation=Logistic()))
    layers.append(CostLayer(cost=CrossEntropy()))
    model = MLP(num_epochs=10, batch_size=128, layers=layers)
    return model
 def __init__(self, backend, dataset):
     layers = []
     layers.append(DataLayer(nout=784))
     layers.append(FCLayer(nout=1000, activation=RectLin()))
     layers.append(FCLayer(nout=10, activation=Logistic()))
     layers.append(CostLayer(cost=CrossEntropy()))
     self.model = MLP(num_epochs=10, batch_size=100, layers=layers)
     self.dataset = dataset
class ConvNet(object):
    """
    The network definition.
    """
    def __init__(self, backend, dataset, subj):
        ad = {
            'type': 'adadelta',
            'lr_params': {
                'rho': 0.9,
                'epsilon': 0.000000001
            }
        }
        self.layers = []
        self.add(
            DataLayer(is_local=True,
                      nofm=dataset.nchannels,
                      ofmshape=[1, dataset.nsamples]))
        self.add(DropOutLayer(keep=0.95))
        self.add(
            ConvLayer(nofm=64,
                      fshape=[1, 3],
                      activation=RectLin(),
                      lrule_init=ad))
        self.add(DropOutLayer(keep=0.8))
        self.add(PoolingLayer(op='max', fshape=[1, 2], stride=2))
        # Addition of a Dropout layer to prevent overfitting
        self.add(DropOutLayer(keep=0.6))
        self.add(FCLayer(nout=128, activation=RectLin(), lrule_init=ad))
        self.add(
            FCLayer(nout=dataset.nclasses,
                    activation=Logistic(),
                    lrule_init=ad))
        self.add(CostLayer(cost=CrossEntropy()))
        self.model = MLP(num_epochs=1, batch_size=128, layers=self.layers)
        self.backend = backend
        self.dataset = dataset

    def add(self, layer):
        self.layers.append(layer)

    def fit(self):
        Fit(model=self.model, backend=self.backend, dataset=self.dataset).run()
        return self

    def predict(self):
        ds = self.dataset
        outputs, targets = self.model.predict_fullset(self.dataset, 'test')
        predshape = (ds.inputs['test'].shape[0], ds.nclasses)
        preds = np.zeros(predshape, dtype=np.float32)
        labs = np.zeros_like(preds)
        # The output returned by the network is less than the number of
        # predictions to be made. We leave the missing predictions as zeros.
        start = ds.winsize - 1
        end = start + outputs.shape[1]
        preds[start:end] = outputs.asnumpyarray().T
        labs[start:end] = targets.asnumpyarray().T
        return labs, preds, ds.testinds
示例#5
0
 def __init__(self, backend, dataset, subj):
     ad = {"type": "adadelta", "lr_params": {"rho": 0.9, "epsilon": 1e-10}}
     self.layers = []
     self.add(DataLayer(is_local=True, nofm=dataset.nchannels, ofmshape=[1, dataset.nsamples]))
     self.add(ConvLayer(nofm=64, fshape=[1, 3], activation=RectLin(), lrule_init=ad))
     self.add(PoolingLayer(op="max", fshape=[1, 2], stride=2))
     # self.add(DropOutLayer(keep=0.5))
     self.add(FCLayer(nout=128, activation=RectLin(), lrule_init=ad))
     self.add(FCLayer(nout=dataset.nclasses, activation=Logistic(), lrule_init=ad))
     self.add(CostLayer(cost=CrossEntropy()))
     self.model = MLP(num_epochs=3, batch_size=128, layers=self.layers)
     self.backend = backend
     self.dataset = dataset
class ConvNet(object):
    """
    The network definition.
    """
    def __init__(self, backend, dataset, subj):
        ad = {
            'type': 'adadelta',
            'lr_params': {'rho': 0.9, 'epsilon': 0.000000001}
        }
        self.layers = []
        self.add(DataLayer(is_local=True, nofm=dataset.nchannels,
                           ofmshape=[1, dataset.nsamples]))
        self.add(DropOutLayer(keep=0.95))
        self.add(ConvLayer(nofm=64, fshape=[1, 3],
                           activation=RectLin(), lrule_init=ad))
        self.add(DropOutLayer(keep=0.8))
        self.add(PoolingLayer(op='max', fshape=[1, 2], stride=2))
	# Addition of a Dropout layer to prevent overfitting	
	self.add(DropOutLayer(keep=0.6))
        self.add(FCLayer(nout=128, activation=RectLin(), lrule_init=ad))
        self.add(FCLayer(nout=dataset.nclasses, activation=Logistic(),
                         lrule_init=ad))
        self.add(CostLayer(cost=CrossEntropy()))
        self.model = MLP(num_epochs=1, batch_size=128, layers=self.layers)
        self.backend = backend
        self.dataset = dataset

    def add(self, layer):
        self.layers.append(layer)

    def fit(self):
        Fit(model=self.model, backend=self.backend, dataset=self.dataset).run()
        return self

    def predict(self):
        ds = self.dataset
        outputs, targets = self.model.predict_fullset(self.dataset, 'test')
        predshape = (ds.inputs['test'].shape[0], ds.nclasses)
        preds = np.zeros(predshape, dtype=np.float32)
        labs = np.zeros_like(preds)
        # The output returned by the network is less than the number of
        # predictions to be made. We leave the missing predictions as zeros.
        start = ds.winsize - 1
        end = start + outputs.shape[1]
        preds[start:end] = outputs.asnumpyarray().T
        labs[start:end] = targets.asnumpyarray().T
        return labs, preds, ds.testinds
示例#7
0
 def __init__(self, backend, dataset, subj):
     ad = {
         'type': 'adadelta',
         'lr_params': {'rho': 0.9, 'epsilon': 1e-10}
     }
     self.layers = []
     self.add(DataLayer(is_local=True, nofm=dataset.nchannels,
                        ofmshape=[1, dataset.nsamples]))
     self.add(ConvLayer(nofm=64, fshape=[1, 3],
                        activation=RectLin(), lrule_init=ad))
     self.add(PoolingLayer(op='max', fshape=[1, 2], stride=2))
     self.add(FCLayer(nout=128, activation=RectLin(), lrule_init=ad))
     self.add(FCLayer(nout=dataset.nclasses, activation=Logistic(),
                      lrule_init=ad))
     self.add(CostLayer(cost=CrossEntropy()))
     self.model = MLP(num_epochs=1, batch_size=128, layers=self.layers)
     self.backend = backend
     self.dataset = dataset
class Network(object):
    def __init__(self, backend, dataset):
        layers = []
        layers.append(DataLayer(nout=784))
        layers.append(FCLayer(nout=1000, activation=RectLin()))
        layers.append(FCLayer(nout=10, activation=Logistic()))
        layers.append(CostLayer(cost=CrossEntropy()))
        self.model = MLP(num_epochs=10, batch_size=100, layers=layers)
        self.dataset = dataset
        
    def fit(self):
        self.experiment = FitExperiment(model=self.model, backend=backend,
                                        dataset=self.dataset)
        self.experiment.run()           
        
    def predict(self):
        outputs, targets = self.model.predict_fullset(self.dataset, 'test')
        preds = np.argmax(outputs.asnumpyarray().T, axis=1)
        return preds