Exemple #1
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.config = config
        self.verbose = self.config['verbose']
        self.build_model()
        # count params
        if self.verbose: self.count_params()
Exemple #2
0
    def __init__(self,config): 
        ModelBase.__init__(self)  

        self.config = config
        self.verbose = self.config['verbose']
        self.build_model()
        # count params
        if self.verbose: self.count_params()
class TestJustRun(unittest.TestCase):
    """ This is not a real test case... it's just a couple of model queries that should go through
     without raising any exception """

    def setUp(self):
        self.mb = ModelBase(name="my mb", model_dir='test_models')

    def test_A(self):
        self.assertTrue('cg_crabs' in self.mb.list_models(), 'this test suite requires the cg_crabs model')

        # crabs has columns: 'species', 'sex', 'FL', 'RW', 'CL', 'CW', 'BD'
        # now run a few queries
        result = self.mb.execute('{"SELECT": ["sex", "FL"], "FROM": "cg_crabs"}')
Exemple #4
0
	def add_notice(self, data):
		notice = {
					"content": data["content"],
					"publisher": ModelBase.get_oid(data["publisher"]),
					"create_date": datetime.utcnow(),
					"invalid_date": datetime.utcnow() + timedelta(days = int(data["life"]))
				}	
		return str(self.collection.insert(notice))
Exemple #5
0
    def __init__(self, dataset, classfeatureindex = -1, alpha = 0.2, maxiter = 50, *args, **kwargs):
        ModelBase.__init__(self, dataset, 'LOGISTIC', *args, **kwargs)
        self.classfeatureindex = classfeatureindex #index of the column which defines the feature in dataset
        self.Test = self.Classify2
        self.Apply = self.ClassifyDataset
        self.Train = self.Regress
        self.Save = self.DumpLogistic
        self.Load = self.LoadLogistic
        self.Graph = self.ShowImage
        self.Positive = 1
        self.Negative = -1
        # use default
        # self.T = self.RealValue
        self.tree = {}

        self.sigmoid = lambda input_n:np.vectorize(lambda n: 1.0/(1.0+math.e**(-n)))(input_n)
        self.alpha = alpha
        self.maxiter = maxiter
        self.weights = np.ones((len(self.dataset.head),1))
        self.classfeatureindex = self.dataset.classfeatureindex
Exemple #6
0
 def __init__(self):
     ModelBase.__init__(self)
     self.model = LogisticRegression()
Exemple #7
0
 
        '''

        sSql = "select * from %s where industry_name = '%s'" % (self.table, sIndustry)
        lRet = super(TIndustrys,self).query(sSql)
        return lRet

    def updateIndustrys(self, sIndustry = '', sDeterminer = '', sFdeter = ''):

        '''
             update industry by industrysname

        '''

        sSql = "update %s set determiner = '%s', fdeterminer = '%s' where industry_name = '%s'" % (self.table, sDeterminer, sFdeter, sIndustry)
        print sSql
        bRet = super(TIndustrys,self).operate(sSql)
        return bRet

    def deleteIndustrys(self, sIndustry = '', sDeterminer = '', sFdeter = ''):
        pass        

#print TIndustrys().addIndustrys('母婴', '奶粉', '家族')
#print TIndustrys().updateIndustrys(sIndustry = '母婴', sDeterminer = '奶粉#婴儿')
lRes = ModelBase().query('select * from industrys')
#for record in lRes:
#    print record['determiner'], "\t", record['fdeterminer']
#print ModelBase().operate("insert into industrys(industrys_name, positive_limit, reverse_limit, is_noise, remain2, remain1) values('b','a','a','a','a',1)")
#print ModelBase().operate("update industrys set industrys_name='c' where industrys_name='b'")
#if len(self.queryIndustrys(sIndustry)) == 0:
Exemple #8
0
 def __init__(self):
     ModelBase.__init__(self)
     self.model = MultinomialNB()
Exemple #9
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.config = config
        self.verbose = self.config['verbose']

        self.name = 'customized'

        # input shape in c01b
        self.channels = 3  # 'c' mean(R,G,B) = (103.939, 116.779, 123.68)
        self.input_width = self.config[
            'input_width']  # '0' single scale training 224
        self.input_height = self.config[
            'input_height']  # '1' single scale training 224
        self.batch_size = self.config['batch_size']  # 'b'

        # output dimension
        self.n_softmax_out = self.config['n_softmax_out']

        # training related
        self.base_lr = np.float32(self.config['learning_rate'])
        self.shared_lr = theano.shared(self.base_lr)
        self.step_idx = 0
        self.mu = self.config['momentum']  # def: 0.9 # momentum
        self.eta = self.config['weight_decay']  #0.0002 # weight decay

        self.shared_x = theano.shared(np.zeros(
            (3, self.input_width, self.input_height,
             self.config['file_batch_size']),
            dtype=theano.config.floatX),
                                      borrow=True)

        self.shared_y = theano.shared(np.zeros(
            (self.config['file_batch_size'], ), dtype=int),
                                      borrow=True)

        self.build_model()

        self.output = self.output_layer.output

        self.layers = get_layers(lastlayer=self.output_layer)

        self.layers = [layer for layer in self.layers \
            if layer.name not in ['LRN\t','Pool\t','Flatten\t','Dropout'+ str(0.5)]]

        self.params, self.weight_types = get_params(self.layers)
        # if multi-stream layers exist, redefine and abstract into one layer class in layers2.py

        self.count_params()

        # shared variable for storing momentum before exchanging momentum(delta w)
        self.vels = [
            theano.shared(param_i.get_value() * 0.) for param_i in self.params
        ]

        # shared variable for accepting momentum during exchanging momentum(delta w)
        self.vels2 = [
            theano.shared(param_i.get_value() * 0.) for param_i in self.params
        ]

        self.train = None
        self.val = None
        self.inference = None
        self.get_vel = None
        self.descent_vel = None
Exemple #10
0
	def get_one(self, user_id):
		result = self.collection.find_one(ModelBase.get_oid(user_id))
		return ModelBase.transform_id(result)
Exemple #11
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.config = config
        self.verbose = self.config['verbose']
        self.name = 'alexnet'
        batch_size = config['batch_size']
        flag_datalayer = config['use_data_layer']
        lib_conv = config['lib_conv']
        n_softmax_out=config['n_softmax_out']
        # ##################### BUILD NETWORK ##########################
        # allocate symbolic variables for the data
        # 'rand' is a random array used for random cropping/mirroring of data
        x = T.ftensor4('x')
        y = T.lvector('y')
        rand = T.fvector('rand')
        lr = T.scalar('lr')

        if self.verbose: print 'AlexNet 2/16'
        self.layers = []
        params = []
        weight_types = []

        if flag_datalayer:
            data_layer = DataLayer(input=x, image_shape=(3, 256, 256,
                                                         batch_size),
                                   cropsize=227, rand=rand, mirror=True,
                                   flag_rand=config['rand_crop'])

            layer1_input = data_layer.output
        else:
            layer1_input = x

        convpool_layer1 = ConvPoolLayer(input=layer1_input,
                                        image_shape=(3, 227, 227, batch_size),
                                        filter_shape=(3, 11, 11, 96),
                                        convstride=4, padsize=0, group=1,
                                        poolsize=3, poolstride=2,
                                        bias_init=0.0, lrn=True,
                                        lib_conv=lib_conv,
                                        verbose = self.verbose
                                        )
        self.layers.append(convpool_layer1)
        params += convpool_layer1.params
        weight_types += convpool_layer1.weight_type

        convpool_layer2 = ConvPoolLayer(input=convpool_layer1.output,
                                        image_shape=(96, 27, 27, batch_size),
                                        filter_shape=(96, 5, 5, 256),
                                        convstride=1, padsize=2, group=2,
                                        poolsize=3, poolstride=2,
                                        bias_init=0.1, lrn=True,
                                        lib_conv=lib_conv,
                                        verbose = self.verbose
                                        )
        self.layers.append(convpool_layer2)
        params += convpool_layer2.params
        weight_types += convpool_layer2.weight_type

        convpool_layer3 = ConvPoolLayer(input=convpool_layer2.output,
                                        image_shape=(256, 13, 13, batch_size),
                                        filter_shape=(256, 3, 3, 384),
                                        convstride=1, padsize=1, group=1,
                                        poolsize=1, poolstride=0,
                                        bias_init=0.0, lrn=False,
                                        lib_conv=lib_conv,
                                        verbose = self.verbose
                                        )
        self.layers.append(convpool_layer3)
        params += convpool_layer3.params
        weight_types += convpool_layer3.weight_type

        convpool_layer4 = ConvPoolLayer(input=convpool_layer3.output,
                                        image_shape=(384, 13, 13, batch_size),
                                        filter_shape=(384, 3, 3, 384),
                                        convstride=1, padsize=1, group=2,
                                        poolsize=1, poolstride=0,
                                        bias_init=0.1, lrn=False,
                                        lib_conv=lib_conv,
                                        verbose = self.verbose
                                        )
        self.layers.append(convpool_layer4)
        params += convpool_layer4.params
        weight_types += convpool_layer4.weight_type

        convpool_layer5 = ConvPoolLayer(input=convpool_layer4.output,
                                        image_shape=(384, 13, 13, batch_size),
                                        filter_shape=(384, 3, 3, 256),
                                        convstride=1, padsize=1, group=2,
                                        poolsize=3, poolstride=2,
                                        bias_init=0.0, lrn=False,
                                        lib_conv=lib_conv,
                                        verbose = self.verbose
                                        )
        self.layers.append(convpool_layer5)
        params += convpool_layer5.params
        weight_types += convpool_layer5.weight_type

        fc_layer6_input = T.flatten(
            convpool_layer5.output.dimshuffle(3, 0, 1, 2), 2)
        fc_layer6 = FCLayer(input=fc_layer6_input, 
                            n_in=9216,
                            n_out=4096,
                            verbose = self.verbose
                            )
        self.layers.append(fc_layer6)
        params += fc_layer6.params
        weight_types += fc_layer6.weight_type

        dropout_layer6 = DropoutLayer(fc_layer6.output, 
                                      n_in=4096, 
                                      n_out=4096, 
                                      verbose = self.verbose)

        fc_layer7 = FCLayer(input=dropout_layer6.output, 
                            n_in=4096, 
                            n_out=4096,
                            verbose = self.verbose
                            )
        self.layers.append(fc_layer7)
        params += fc_layer7.params
        weight_types += fc_layer7.weight_type

        dropout_layer7 = DropoutLayer(fc_layer7.output, 
                                      n_in=4096, 
                                      n_out=4096,
                                      verbose = self.verbose)

        softmax_layer8 = SoftmaxLayer(input=dropout_layer7.output, 
                                      n_in=4096, 
                                      n_out=n_softmax_out,
                                      verbose = self.verbose)
        self.layers.append(softmax_layer8)
        params += softmax_layer8.params
        weight_types += softmax_layer8.weight_type

        # #################### NETWORK BUILT #######################
        self.p_y_given_x = softmax_layer8.p_y_given_x
        self.y_pred = softmax_layer8.y_pred
        
        
        self.cost = softmax_layer8.negative_log_likelihood(y)
        self.errors = softmax_layer8.errors(y)
        if n_softmax_out < 5:        
            self.errors_top_5 = softmax_layer8.errors_top_x(y, n_softmax_out)
        else:        
            self.errors_top_5 = softmax_layer8.errors_top_x(y, 5)       
        self.params = params
        
        # inputs
        self.x = x
        self.y = y
        self.rand = rand
        self.lr = lr
        self.shared_x = theano.shared(np.zeros((3, config['input_width'], 
                                                  config['input_height'], 
                                                  config['file_batch_size']), # for loading large batch
                                                  dtype=theano.config.floatX),  
                                                  borrow=True)
                                              
        self.shared_y = theano.shared(np.zeros((config['file_batch_size'],), 
                                          dtype=int),   borrow=True)
        self.shared_lr = theano.shared(np.float32(config['learning_rate']))
        
        # training related
        self.base_lr = np.float32(config['learning_rate'])
        self.step_idx = 0
        self.mu = config['momentum'] # def: 0.9 # momentum
        self.eta = config['weight_decay'] #0.0002 # weight decay
        self.weight_types = weight_types
        self.batch_size = batch_size

                                          
        self.grads = T.grad(self.cost,self.params)
        
        # shared variable for storing momentum before exchanging momentum(delta w)
        self.vels = [theano.shared(param_i.get_value() * 0.)
            for param_i in self.params]
        
        # shared variable for accepting momentum during exchanging momentum(delta w)
        self.vels2 = [theano.shared(param_i.get_value() * 0.)
            for param_i in self.params]
            
        self.train = None
        self.get_vel = None
        self.descent_vel = None
        self.val = None
        self.inference = None
Exemple #12
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.config = config
        self.verbose = self.config['verbose']
        self.name = 'alexnet'
        batch_size = config['batch_size']
        flag_datalayer = config['use_data_layer']
        lib_conv = config['lib_conv']
        n_softmax_out = config['n_softmax_out']
        # ##################### BUILD NETWORK ##########################
        # allocate symbolic variables for the data
        # 'rand' is a random array used for random cropping/mirroring of data
        x = T.ftensor4('x')
        y = T.lvector('y')
        rand = T.fvector('rand')
        lr = T.scalar('lr')

        if self.verbose: print 'AlexNet 2/16'
        self.layers = []
        params = []
        weight_types = []

        if flag_datalayer:
            data_layer = DataLayer(input=x,
                                   image_shape=(3, 256, 256, batch_size),
                                   cropsize=227,
                                   rand=rand,
                                   mirror=True,
                                   flag_rand=config['rand_crop'])

            layer1_input = data_layer.output
        else:
            layer1_input = x

        convpool_layer1 = ConvPoolLayer(input=layer1_input,
                                        image_shape=(3, 227, 227, batch_size),
                                        filter_shape=(3, 11, 11, 96),
                                        convstride=4,
                                        padsize=0,
                                        group=1,
                                        poolsize=3,
                                        poolstride=2,
                                        bias_init=0.0,
                                        lrn=True,
                                        lib_conv=lib_conv,
                                        verbose=self.verbose)
        self.layers.append(convpool_layer1)
        params += convpool_layer1.params
        weight_types += convpool_layer1.weight_type

        convpool_layer2 = ConvPoolLayer(input=convpool_layer1.output,
                                        image_shape=(96, 27, 27, batch_size),
                                        filter_shape=(96, 5, 5, 256),
                                        convstride=1,
                                        padsize=2,
                                        group=2,
                                        poolsize=3,
                                        poolstride=2,
                                        bias_init=0.1,
                                        lrn=True,
                                        lib_conv=lib_conv,
                                        verbose=self.verbose)
        self.layers.append(convpool_layer2)
        params += convpool_layer2.params
        weight_types += convpool_layer2.weight_type

        convpool_layer3 = ConvPoolLayer(input=convpool_layer2.output,
                                        image_shape=(256, 13, 13, batch_size),
                                        filter_shape=(256, 3, 3, 384),
                                        convstride=1,
                                        padsize=1,
                                        group=1,
                                        poolsize=1,
                                        poolstride=0,
                                        bias_init=0.0,
                                        lrn=False,
                                        lib_conv=lib_conv,
                                        verbose=self.verbose)
        self.layers.append(convpool_layer3)
        params += convpool_layer3.params
        weight_types += convpool_layer3.weight_type

        convpool_layer4 = ConvPoolLayer(input=convpool_layer3.output,
                                        image_shape=(384, 13, 13, batch_size),
                                        filter_shape=(384, 3, 3, 384),
                                        convstride=1,
                                        padsize=1,
                                        group=2,
                                        poolsize=1,
                                        poolstride=0,
                                        bias_init=0.1,
                                        lrn=False,
                                        lib_conv=lib_conv,
                                        verbose=self.verbose)
        self.layers.append(convpool_layer4)
        params += convpool_layer4.params
        weight_types += convpool_layer4.weight_type

        convpool_layer5 = ConvPoolLayer(input=convpool_layer4.output,
                                        image_shape=(384, 13, 13, batch_size),
                                        filter_shape=(384, 3, 3, 256),
                                        convstride=1,
                                        padsize=1,
                                        group=2,
                                        poolsize=3,
                                        poolstride=2,
                                        bias_init=0.0,
                                        lrn=False,
                                        lib_conv=lib_conv,
                                        verbose=self.verbose)
        self.layers.append(convpool_layer5)
        params += convpool_layer5.params
        weight_types += convpool_layer5.weight_type

        fc_layer6_input = T.flatten(
            convpool_layer5.output.dimshuffle(3, 0, 1, 2), 2)
        fc_layer6 = FCLayer(input=fc_layer6_input,
                            n_in=9216,
                            n_out=4096,
                            verbose=self.verbose)
        self.layers.append(fc_layer6)
        params += fc_layer6.params
        weight_types += fc_layer6.weight_type

        dropout_layer6 = DropoutLayer(fc_layer6.output,
                                      n_in=4096,
                                      n_out=4096,
                                      verbose=self.verbose)

        fc_layer7 = FCLayer(input=dropout_layer6.output,
                            n_in=4096,
                            n_out=4096,
                            verbose=self.verbose)
        self.layers.append(fc_layer7)
        params += fc_layer7.params
        weight_types += fc_layer7.weight_type

        dropout_layer7 = DropoutLayer(fc_layer7.output,
                                      n_in=4096,
                                      n_out=4096,
                                      verbose=self.verbose)

        softmax_layer8 = SoftmaxLayer(input=dropout_layer7.output,
                                      n_in=4096,
                                      n_out=n_softmax_out,
                                      verbose=self.verbose)
        self.layers.append(softmax_layer8)
        params += softmax_layer8.params
        weight_types += softmax_layer8.weight_type

        # #################### NETWORK BUILT #######################
        self.p_y_given_x = softmax_layer8.p_y_given_x
        self.y_pred = softmax_layer8.y_pred

        self.output = self.p_y_given_x

        self.cost = softmax_layer8.negative_log_likelihood(y)
        self.error = softmax_layer8.errors(y)
        if n_softmax_out < 5:
            self.error_top_5 = softmax_layer8.errors_top_x(y, n_softmax_out)
        else:
            self.error_top_5 = softmax_layer8.errors_top_x(y, 5)
        self.params = params

        # inputs
        self.x = x
        self.y = y
        self.rand = rand
        self.lr = lr
        self.shared_x = theano.shared(
            np.zeros(
                (3, config['input_width'], config['input_height'],
                 config['file_batch_size']),  # for loading large batch
                dtype=theano.config.floatX),
            borrow=True)

        self.shared_y = theano.shared(np.zeros((config['file_batch_size'], ),
                                               dtype=int),
                                      borrow=True)
        self.shared_lr = theano.shared(np.float32(config['learning_rate']))

        # training related
        self.base_lr = np.float32(config['learning_rate'])
        self.step_idx = 0
        self.mu = config['momentum']  # def: 0.9 # momentum
        self.eta = config['weight_decay']  #0.0002 # weight decay
        self.weight_types = weight_types
        self.batch_size = batch_size

        self.grads = T.grad(self.cost, self.params)

        subb_ind = T.iscalar('subb')  # sub batch index
        #print self.shared_x[:,:,:,subb_ind*self.batch_size:(subb_ind+1)*self.batch_size].shape.eval()
        self.subb_ind = subb_ind
        self.shared_x_slice = self.shared_x[:, :, :, subb_ind *
                                            self.batch_size:(subb_ind + 1) *
                                            self.batch_size]
        self.shared_y_slice = self.shared_y[subb_ind *
                                            self.batch_size:(subb_ind + 1) *
                                            self.batch_size]
    def __init__(self,config): 
        ModelBase.__init__(self)  

        self.config = config
        self.verbose = self.config['verbose']
        self.build_model()
Exemple #14
0
 def remove(self, user_id, record_id):
   record = self.collection.find_one(ModelBase.get_oid(record_id))
   if str(record["user_id"]) == user_id:
       self.collection.remove(record)
       return True
   return False
Exemple #15
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.config = config
        self.verbose = config['verbose']

        self.name = 'vggnet'

        # input shape in c01b
        self.channels = 3  # 'c' mean(R,G,B) = (103.939, 116.779, 123.68)
        self.input_width = self.config[
            'input_width']  # '0' single scale training 224
        self.input_height = self.config[
            'input_height']  # '1' single scale training 224
        self.batch_size = self.config['batch_size']  # 'b'

        # output dimension
        self.n_softmax_out = self.config['n_softmax_out']

        # training related
        self.base_lr = np.float32(self.config['learning_rate'])
        self.shared_lr = theano.shared(self.base_lr)
        self.step_idx = 0
        self.mu = config['momentum']  # def: 0.9 # momentum
        self.eta = config['weight_decay']  #0.0002 # weight decay

        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        self.shared_x = theano.shared(np.zeros(
            (3, self.input_width, self.input_height,
             self.config['file_batch_size']),
            dtype=theano.config.floatX),
                                      borrow=True)

        self.shared_y = theano.shared(np.zeros(
            (self.config['file_batch_size'], ), dtype=int),
                                      borrow=True)

        # build model
        net = self.build_model(input_shape=(self.batch_size, 3,
                                            self.input_width,
                                            self.input_height))  # bc01
        #self.output_layer = net['fc8']
        self.output_layer = net['prob']

        from lasagne.layers import get_all_params
        self.params = lasagne.layers.get_all_params(self.output_layer,
                                                    trainable=True)
        self.extract_weight_types()
        self.pack_layers()

        # count params
        if self.verbose: self.count_params()

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer,
                                                self.x,
                                                deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(
            self.output, self.y).mean()
        self.error = self.errors(self.output, self.y)

        self.grads = T.grad(self.cost, self.params)

        subb_ind = T.iscalar('subb')  # sub batch index
        #print self.shared_x[:,:,:,subb_ind*self.batch_size:(subb_ind+1)*self.batch_size].shape.eval()
        self.subb_ind = subb_ind
        self.shared_x_slice = self.shared_x[:, :, :, subb_ind *
                                            self.batch_size:(subb_ind + 1) *
                                            self.batch_size].dimshuffle(
                                                3, 0, 1, 2)  # c01b to bc01
        self.shared_y_slice = self.shared_y[subb_ind *
                                            self.batch_size:(subb_ind + 1) *
                                            self.batch_size]
Exemple #16
0
	def get_effective(self):
		cursor = self.collection.find(invalid_date = {'$gte': datetime.utcnow()}).sort("create_date", -1)
		notice_list = ModelBase.cursor2list(cursor)
		return [self.to_user(notice) for notice in notice_list]
Exemple #17
0
	def get_all(self):
		cursor = self.collection.find().sort("create_date", -1)
		notice_list = ModelBase.cursor2list(cursor)
		return [self.to_user(notice) for notice in notice_list]
Exemple #18
0
	def get_one(self, notice_id):
		notice = self.collection.find_one(ModelBase.get_oid(notice_id))
		notice = self.to_user(notice)
		return ModelBase.transform_id(notice)
Exemple #19
0
 def __init__(self,config): 
     ModelBase.__init__(self)
     
     self.config = config
     self.verbose = config['verbose']
     
     self.name = 'vggnet'
     
     # input shape in c01b 
     self.channels = 3 # 'c' mean(R,G,B) = (103.939, 116.779, 123.68)
     self.input_width = self.config['input_width'] # '0' single scale training 224
     self.input_height = self.config['input_height'] # '1' single scale training 224
     self.batch_size = self.config['batch_size'] # 'b'
     
     # output dimension
     self.n_softmax_out = self.config['n_softmax_out']
     
     
     
     # training related
     self.base_lr = np.float32(self.config['learning_rate'])
     self.shared_lr = theano.shared(self.base_lr)
     self.step_idx = 0
     self.mu = config['momentum'] # def: 0.9 # momentum
     self.eta = config['weight_decay'] #0.0002 # weight decay
     
     self.x = T.ftensor4('x')
     self.y = T.lvector('y')
     self.lr = T.scalar('lr')      
     
     self.shared_x = theano.shared(np.zeros((
                                             3,
                                             self.input_width, 
                                             self.input_height,
                                             self.config['file_batch_size']
                                             ), 
                                             dtype=theano.config.floatX),  
                                             borrow=True)
                                           
     self.shared_y = theano.shared(np.zeros((self.config['file_batch_size'],), 
                                       dtype=int),   borrow=True)
                                       
     # build model                                 
     net = self.build_model(input_shape=(self.batch_size, 3, self.input_width, self.input_height)) # bc01
     #self.output_layer = net['fc8'] 
     self.output_layer = net['prob']
     
     from lasagne.layers import get_all_params
     self.params = lasagne.layers.get_all_params(self.output_layer, trainable=True)
     self.extract_weight_types()
     self.pack_layers()
     
     # count params
     if self.verbose: self.count_params()
     
     from lasagne.layers import get_output
     self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
     self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
     self.error = self.errors(self.output, self.y)
     
     
     self.grads = T.grad(self.cost,self.params)
                                       
     subb_ind = T.iscalar('subb')  # sub batch index
     #print self.shared_x[:,:,:,subb_ind*self.batch_size:(subb_ind+1)*self.batch_size].shape.eval()
     self.subb_ind = subb_ind
     self.shared_x_slice = self.shared_x[:,:,:,subb_ind*self.batch_size:(subb_ind+1)*self.batch_size].dimshuffle(3, 0, 1, 2) # c01b to bc01
     self.shared_y_slice = self.shared_y[subb_ind*self.batch_size:(subb_ind+1)*self.batch_size]
Exemple #20
0
 def get_all(self, user_id):
   favorites = self.collection.find({"user_id": ModelBase.get_oid(user_id)}, {"song_id": 1})
   return ModelBase.cursor2list(favorites)
Exemple #21
0
 def setUp(self):
     self.mb = ModelBase(name="my mb", model_dir='test_models')
Exemple #22
0
 def add(self, user_id, song_id):
   fav = self.collection.insert({
                                 "user_id": ModelBase.get_oid(user_id),
                                 "song_id": song_id
                                })
   return str(fav)
Exemple #23
0
 def __init__(self,config): 
     ModelBase.__init__(self)
     
     self.config = config
     self.verbose = config['verbose']
     
     self.name = 'vggnet'
     
     # input shape in c01b 
     self.channels = 3 # 'c' mean(R,G,B) = (103.939, 116.779, 123.68)
     self.input_width = self.config['input_width'] # '0' single scale training 224
     self.input_height = self.config['input_height'] # '1' single scale training 224
     self.batch_size = self.config['batch_size'] # 'b'
     
     # output dimension
     self.n_softmax_out = self.config['n_softmax_out']
     
     
     
     # training related
     self.base_lr = np.float32(self.config['learning_rate'])
     self.shared_lr = theano.shared(self.base_lr)
     self.step_idx = 0
     self.mu = config['momentum'] # def: 0.9 # momentum
     self.eta = config['weight_decay'] #0.0002 # weight decay
     
     self.x = T.ftensor4('x')
     self.y = T.lvector('y')      
     
     self.shared_x = theano.shared(np.zeros((
                                             3,
                                             self.input_width, 
                                             self.input_height,
                                             self.config['file_batch_size']
                                             ), 
                                             dtype=theano.config.floatX),  
                                             borrow=True)
                                           
     self.shared_y = theano.shared(np.zeros((self.config['file_batch_size'],), 
                                       dtype=int),   borrow=True)
                                       
     # build model                                 
     net = self.build_model(input_shape=(self.batch_size, 3, self.input_width, self.input_height)) # bc01
     #self.output_layer = net['fc8'] 
     self.output_layer = net['prob']
     
     from lasagne.layers import get_all_params
     self.params = lasagne.layers.get_all_params(self.output_layer, trainable=True)
     self.extract_weight_types()
     self.pack_layers()
     
     # count params
     if self.verbose: self.count_params()
     
                                       
     # shared variable for storing momentum before exchanging momentum(delta w)
     self.vels = [theano.shared(param_i.get_value() * 0.)
         for param_i in self.params]
     
     # shared variable for accepting momentum during exchanging momentum(delta w)
     self.vels2 = [theano.shared(param_i.get_value() * 0.)
         for param_i in self.params]
         
                                       
     self.train = None
     self.val = None
     self.inference = None
     self.get_vel = None
     self.descent_vel = None
Exemple #24
0
    def __init__(self,config): 
        ModelBase.__init__(self)  

        self.config = config
        self.verbose = self.config['verbose']
        
        self.name = 'customized'
        
        # input shape in c01b 
        self.channels = 3 # 'c' mean(R,G,B) = (103.939, 116.779, 123.68)
        self.input_width = self.config['input_width'] # '0' single scale training 224
        self.input_height = self.config['input_height'] # '1' single scale training 224
        self.batch_size = self.config['batch_size'] # 'b'
        
        # output dimension
        self.n_softmax_out = self.config['n_softmax_out']
        
        # training related
        self.base_lr = np.float32(self.config['learning_rate'])
        self.shared_lr = theano.shared(self.base_lr)
        self.step_idx = 0
        self.mu = self.config['momentum'] # def: 0.9 # momentum
        self.eta = self.config['weight_decay'] #0.0002 # weight decay
        
        self.shared_x = theano.shared(np.zeros((
                                                3,
                                                self.input_width, 
                                                self.input_height,
                                                self.config['file_batch_size']
                                                ), 
                                                dtype=theano.config.floatX),  
                                                borrow=True)
                                              
        self.shared_y = theano.shared(np.zeros((self.config['file_batch_size'],), 
                                          dtype=int),   borrow=True)                                  
        
        # build model
        self.build_model()
        
        self.output = self.output_layer.output
        
        self.layers = get_layers(lastlayer = self.output_layer)
        
        self.layers = [layer for layer in self.layers \
            if layer.name not in ['LRN\t','Pool\t','Flatten\t','Dropout'+ str(0.5)]]
        
        self.params, self.weight_types = get_params(self.layers) 
        # if multi-stream layers exist, redefine and abstract into one layer class in layers2.py
        
        # count params
        self.count_params()
        
        # shared variable for storing momentum before exchanging momentum(delta w)
        self.vels = [theano.shared(param_i.get_value() * 0.)
            for param_i in self.params]
        
        # shared variable for accepting momentum during exchanging momentum(delta w)
        self.vels2 = [theano.shared(param_i.get_value() * 0.)
            for param_i in self.params]
                                          
        self.train = None
        self.val = None
        self.inference = None
        self.get_vel = None
        self.descent_vel = None
Exemple #25
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.config = config
        self.verbose = self.config['verbose']
        self.build_model()
Exemple #26
0
    def __init__(self, config):
        ModelBase.__init__(self)

        self.verbose = config['verbose']
        self.config = config
        if self.verbose: print 'GoogLeNet 7/5'

        batch_size = config['batch_size']
        input_width = config['input_width']
        input_height = config['input_height']
        n_softmax_out = config['n_softmax_out']

        self.name = 'googlenet'
        self.batch_size = batch_size
        self.input_width = input_width
        self.input_height = input_height
        self.n_softmax_out = n_softmax_out
        self.lrn_func = CrossChannelNormalization()

        x = T.ftensor4('x')
        y = T.lvector('y')
        lr = T.scalar('lr')

        self.x = x  # c01b
        self.y = y
        self.lr = lr

        layers = []
        params = []
        weight_types = []

        conv_7x7 = ConvPool_LRN(
            input=x,
            image_shape=(3, 224, 224,
                         batch_size),  #c01b (3, 224, 224, batch_size)
            filter_shape=(3, 7, 7, 64),
            convstride=2,
            padsize=3,
            poolsize=3,
            poolstride=2,
            poolpad=1,
            W=Weight((3, 7, 7, 64), mean=0.0, std=0.1),
            b=Weight((64, ), mean=0.2, std=0),
            lrn=True,
            lib_conv='cudnn',
        )
        layers.append(conv_7x7)
        params += conv_7x7.params
        weight_types += conv_7x7.weight_type
        # output shape = (112x112x64)
        # output shape = (56x56x64)

        conv_r3x3 = Conv(
            input=conv_7x7.output,
            image_shape=(64, 56, 56, batch_size),
            filter_shape=(64, 1, 1, 64),
            convstride=1,
            padsize=0,
            W=Weight((64, 1, 1, 64), mean=0.0, std=0.1),
            b=Weight((64, ), mean=0.2, std=0),
            lib_conv='cudnn',
        )

        layers.append(conv_r3x3)
        params += conv_r3x3.params
        weight_types += conv_r3x3.weight_type
        # output shape = (56x56x64)

        conv_3x3 = ConvPool_LRN(
            input=conv_r3x3.output,
            image_shape=(64, 56, 56, batch_size),
            filter_shape=(64, 3, 3, 192),
            convstride=1,
            padsize=1,
            poolsize=3,
            poolstride=2,
            poolpad=1,
            W=Weight((64, 3, 3, 192), mean=0.0, std=0.03),
            b=Weight((192, ), mean=0.2, std=0),
            lrn=True,
            lib_conv='cudnn',
        )

        layers.append(conv_3x3)
        params += conv_3x3.params
        weight_types += conv_3x3.weight_type
        # output shape = (56x56x192)
        # output shape = (28x28x192)

        incep3a = Incept(conv_3x3.output,
                         input_shape=(192, 28, 28, batch_size))

        layers += incep3a.layers
        params += incep3a.params
        weight_types += incep3a.weight_types
        print 'incep3a output shape: (28x28x256)'
        # output shape = (28x28x256)

        incep3b = Incept(incep3a.output,
                         input_shape=(256, 28, 28, batch_size),
                         n1x1=128,
                         nr3x3=128,
                         n3x3=192,
                         nr5x5=32,
                         n5x5=96,
                         npj=64)

        layers += incep3b.layers
        params += incep3b.params
        weight_types += incep3b.weight_types
        print 'incep3b output shape: (28x28x480)'
        # output shape = (28x28x480)

        #        lrn3 = self.lrn_func(incep3b.output)
        #        print 'LRN(added)'

        pool3 = Pool(input=incep3b.output,
                     poolsize=3,
                     poolstride=2,
                     poolpad=1,
                     mode='max')
        # output shape = (14x14x480)

        incep4a = Incept(pool3.output,
                         input_shape=(480, 14, 14, batch_size),
                         n1x1=192,
                         nr3x3=96,
                         n3x3=208,
                         nr5x5=16,
                         n5x5=48,
                         npj=64)

        layers += incep4a.layers
        params += incep4a.params
        weight_types += incep4a.weight_types
        print 'incep4a output shape: (14x14x512)'
        # output shape = (14x14x512)

        incep4b = Incept(incep4a.output,
                         input_shape=(512, 14, 14, batch_size),
                         n1x1=160,
                         nr3x3=112,
                         n3x3=224,
                         nr5x5=24,
                         n5x5=64,
                         npj=64)

        layers += incep4b.layers
        params += incep4b.params
        weight_types += incep4b.weight_types
        print 'incep4b output shape: (14x14x512)'
        # output shape = (14x14x512)

        incep4c = Incept(incep4b.output,
                         input_shape=(512, 14, 14, batch_size),
                         n1x1=128,
                         nr3x3=128,
                         n3x3=256,
                         nr5x5=24,
                         n5x5=64,
                         npj=64)

        layers += incep4c.layers
        params += incep4c.params
        weight_types += incep4c.weight_types
        print 'incep4c output shape: (14x14x512)'
        # output shape = (14x14x512)

        incep4d = Incept(incep4c.output,
                         input_shape=(512, 14, 14, batch_size),
                         n1x1=112,
                         nr3x3=144,
                         n3x3=288,
                         nr5x5=32,
                         n5x5=64,
                         npj=64)

        layers += incep4d.layers
        params += incep4d.params
        weight_types += incep4d.weight_types
        print 'incep4d output shape: (14x14x528)'
        # output shape = (14x14x528)

        incep4e = Incept(incep4d.output,
                         input_shape=(528, 14, 14, batch_size),
                         n1x1=256,
                         nr3x3=160,
                         n3x3=320,
                         nr5x5=32,
                         n5x5=128,
                         npj=128)

        layers += incep4e.layers
        params += incep4e.params
        weight_types += incep4e.weight_types
        print 'incep4e output shape: (14x14x832)'
        # output shape = (14x14x832)

        lrn4 = self.lrn_func(
            incep4e.output)  # turn on only this for 16data, 53s/5120images
        print 'LRN(added)'

        pool4 = Pool(
            input=lrn4,  #incep4e.output,
            poolsize=3,
            poolstride=2,
            poolpad=1,
            mode='max')
        # output shape = (7x7x832)

        incep5a = Incept(pool4.output,
                         input_shape=(832, 7, 7, batch_size),
                         n1x1=256,
                         nr3x3=160,
                         n3x3=320,
                         nr5x5=32,
                         n5x5=128,
                         npj=128)

        layers += incep5a.layers
        params += incep5a.params
        weight_types += incep5a.weight_types
        print 'incep5a output shape: (7x7x832)'
        # output shape = (7x7x832)

        incep5b = Incept(incep5a.output,
                         input_shape=(832, 7, 7, batch_size),
                         n1x1=384,
                         nr3x3=192,
                         n3x3=384,
                         nr5x5=48,
                         n5x5=128,
                         npj=128)

        layers += incep5b.layers
        params += incep5b.params
        weight_types += incep5b.weight_types
        print 'incep5b output shape: (7x7x1024)'
        # output shape = (7x7x1024)

        #        lrn5 = self.lrn_func(incep5b.output) # turn on only this for 16data, 51s/5120images
        #        print 'LRN(added)'

        poolx = Pool(input=incep5b.output,
                     poolsize=7,
                     poolstride=1,
                     poolpad=0,
                     mode='average')
        # output shape = (1x1x1024)

        l_flatten = T.flatten(poolx.output.dimshuffle(3, 0, 1, 2), 2)
        # output shape = (1024)

        dropout = Dropout(input=l_flatten,
                          n_in=1024,
                          n_out=1024,
                          prob_drop=0.4)
        # output shape = (1024)

        softmax_layer = Softmax(input=dropout.output,
                                n_in=1024,
                                n_out=n_softmax_out)
        # output shape = (n_softmax_out)

        layers.append(softmax_layer)
        params += softmax_layer.params
        weight_types += softmax_layer.weight_type

        # auxilary classifier
        print 'auxilary classifier 1:'
        aux1 = aux_tower(input=incep4a.output,
                         input_shape=(512, 14, 14, batch_size),
                         config=config)

        layers += aux1.layers
        params += aux1.params
        weight_types += aux1.weight_types

        print 'auxilary classifier 2:'
        aux2 = aux_tower(input=incep4d.output,
                         input_shape=(528, 14, 14, batch_size),
                         config=config)

        layers += aux2.layers
        params += aux2.params
        weight_types += aux2.weight_types

        self.layers = layers
        self.params = params
        self.weight_types = weight_types
        self.output = softmax_layer.p_y_given_x
        self.cost = softmax_layer.negative_log_likelihood(y)+\
                0.3*aux1.negative_log_likelihood(y)+0.3*aux2.negative_log_likelihood(y)
        self.error = softmax_layer.errors(y)
        self.error_top_5 = softmax_layer.errors_top_x(y)

        # training related
        self.base_lr = np.float32(config['learning_rate'])
        self.shared_lr = theano.shared(self.base_lr)
        self.mu = config['momentum']  # def: 0.9 # momentum
        self.eta = config['weight_decay']  #0.0002 # weight decay

        self.shared_x = theano.shared(np.zeros(
            (3, config['input_width'], config['input_height'],
             config['file_batch_size']),
            dtype=theano.config.floatX),
                                      borrow=True)

        self.shared_y = theano.shared(np.zeros((config['file_batch_size'], ),
                                               dtype=int),
                                      borrow=True)

        self.grads = T.grad(self.cost, self.params)

        subb_ind = T.iscalar('subb')  # sub batch index
        #print self.shared_x[:,:,:,subb_ind*self.batch_size:(subb_ind+1)*self.batch_size].shape.eval()
        self.subb_ind = subb_ind
        self.shared_x_slice = self.shared_x[:, :, :, subb_ind *
                                            self.batch_size:(subb_ind + 1) *
                                            self.batch_size]
        self.shared_y_slice = self.shared_y[subb_ind *
                                            self.batch_size:(subb_ind + 1) *
                                            self.batch_size]
Exemple #27
0
    def __init__(self,config):
        ModelBase.__init__(self)
        
        self.verbose = config['verbose']   
        self.config = config
        if self.verbose: print 'GoogLeNet 7/5'
        
        batch_size = config['batch_size']
        input_width = config['input_width']
        input_height = config['input_height']
        n_softmax_out=config['n_softmax_out']
        
        
        self.name = 'googlenet'
        self.batch_size = batch_size
        self.input_width = input_width
        self.input_height = input_height
        self.n_softmax_out = n_softmax_out
        self.lrn_func = CrossChannelNormalization()


        x = T.ftensor4('x')
        y = T.lvector('y')
        
        self.x = x # c01b
        self.y = y
        
        layers = []
        params = []
        weight_types = []        
        

        conv_7x7 = ConvPool_LRN(input=x,
                                image_shape=(3, 224, 224, batch_size), #c01b (3, 224, 224, batch_size)
                                filter_shape=(3, 7, 7, 64),
                                convstride=2, padsize=3,
                                poolsize=3, poolstride=2, poolpad=1,
                                W = Weight((3, 7, 7, 64), mean = 0.0, std=0.1 ),
                                b = Weight((64,), mean = 0.2 , std=0 ), 
                                lrn=True,
                                lib_conv='cudnn',
                                )
        layers.append(conv_7x7)
        params += conv_7x7.params
        weight_types += conv_7x7.weight_type                    
        # output shape = (112x112x64)
        # output shape = (56x56x64)
                                  
        conv_r3x3 = Conv(input=conv_7x7.output,
                        image_shape=(64,56,56,batch_size),
                        filter_shape=(64, 1, 1, 64),
                        convstride=1, padsize=0,
                        W = Weight((64, 1, 1, 64), mean = 0.0, std=0.1 ),
                        b = Weight((64,), mean = 0.2 , std=0 ),
                        lib_conv='cudnn',
                        )                                           

        layers.append(conv_r3x3)
        params += conv_r3x3.params
        weight_types += conv_r3x3.weight_type   
        # output shape = (56x56x64)                       

                                
                                        
        conv_3x3 = ConvPool_LRN(input=conv_r3x3.output,
                                image_shape=(64,56,56,batch_size),
                                filter_shape=(64, 3, 3, 192),
                                convstride=1, padsize=1,
                                poolsize=3, poolstride=2, poolpad=1,
                                W = Weight((64, 3, 3, 192), mean = 0.0, std=0.03 ),
                                b = Weight((192,), mean = 0.2 , std=0 ), 
                                lrn=True,
                                lib_conv='cudnn',
                                )                                           

        layers.append(conv_3x3)
        params += conv_3x3.params
        weight_types += conv_3x3.weight_type  
        # output shape = (56x56x192) 
        # output shape = (28x28x192)


        incep3a = Incept(conv_3x3.output,input_shape = (192,28,28,batch_size))
        
        layers += incep3a.layers
        params += incep3a.params
        weight_types += incep3a.weight_types        
        print 'incep3a output shape: (28x28x256)'
        # output shape = (28x28x256)
        
        incep3b = Incept(incep3a.output,input_shape = (256,28,28,batch_size),
                          n1x1=128, nr3x3=128, n3x3=192, 
                          nr5x5=32, n5x5=96, npj=64)
        
        layers += incep3b.layers
        params += incep3b.params
        weight_types += incep3b.weight_types        
        print 'incep3b output shape: (28x28x480)'
        # output shape = (28x28x480)        

#        lrn3 = self.lrn_func(incep3b.output)
#        print 'LRN(added)'
        
        pool3 = Pool(input=incep3b.output,
                      poolsize=3, poolstride=2, poolpad=1, 
                      mode = 'max' )        
        # output shape = (14x14x480)
        
        incep4a = Incept(pool3.output, input_shape = (480,14,14,batch_size), 
                          n1x1=192, nr3x3=96, n3x3=208, 
                          nr5x5=16, n5x5=48, npj=64)
        
        layers += incep4a.layers
        params += incep4a.params
        weight_types += incep4a.weight_types        
        print 'incep4a output shape: (14x14x512)'
        # output shape = (14x14x512)
        
        incep4b = Incept(incep4a.output, input_shape = (512,14,14,batch_size), 
                          n1x1=160, nr3x3=112, n3x3=224, 
                          nr5x5=24, n5x5=64, npj=64)
        
        layers += incep4b.layers
        params += incep4b.params
        weight_types += incep4b.weight_types
        print 'incep4b output shape: (14x14x512)'        
        # output shape = (14x14x512)          
        

        incep4c = Incept(incep4b.output, input_shape = (512,14,14,batch_size), 
                          n1x1=128, nr3x3=128, n3x3=256, 
                          nr5x5=24, n5x5=64, npj=64)
        
        layers += incep4c.layers
        params += incep4c.params
        weight_types += incep4c.weight_types
        print 'incep4c output shape: (14x14x512)'        
        # output shape = (14x14x512) 

        incep4d = Incept(incep4c.output, input_shape = (512,14,14,batch_size), 
                          n1x1=112, nr3x3=144, n3x3=288, 
                          nr5x5=32, n5x5=64, npj=64)
        
        layers += incep4d.layers
        params += incep4d.params
        weight_types += incep4d.weight_types
        print 'incep4d output shape: (14x14x528)'        
        # output shape = (14x14x528) 
         
        
        incep4e = Incept(incep4d.output, input_shape = (528,14,14,batch_size), 
                          n1x1=256, nr3x3=160, n3x3=320, 
                          nr5x5=32, n5x5=128, npj=128)
        
        layers += incep4e.layers
        params += incep4e.params
        weight_types += incep4e.weight_types
        print 'incep4e output shape: (14x14x832)'       
        # output shape = (14x14x832)                
        
        lrn4 = self.lrn_func(incep4e.output)  # turn on only this for 16data, 53s/5120images
        print 'LRN(added)'
        
        pool4 = Pool(input=lrn4, #incep4e.output,
                      poolsize=3, poolstride=2, poolpad=1, 
                      mode = 'max' )        
        # output shape = (7x7x832)        
        
        incep5a = Incept(pool4.output, input_shape = (832,7,7,batch_size), 
                          n1x1=256, nr3x3=160, n3x3=320, 
                          nr5x5=32, n5x5=128, npj=128)
        
        layers += incep5a.layers
        params += incep5a.params
        weight_types += incep5a.weight_types
        print 'incep5a output shape: (7x7x832)'      
        # output shape = (7x7x832)   
        
        
        incep5b = Incept(incep5a.output, input_shape = (832,7,7,batch_size), 
                          n1x1=384, nr3x3=192, n3x3=384, 
                          nr5x5=48, n5x5=128, npj=128)
        
        layers += incep5b.layers
        params += incep5b.params
        weight_types += incep5b.weight_types
        print 'incep5b output shape: (7x7x1024)' 
        # output shape = (7x7x1024)
        
#        lrn5 = self.lrn_func(incep5b.output) # turn on only this for 16data, 51s/5120images
#        print 'LRN(added)'
        
        poolx = Pool(input=incep5b.output,
                      poolsize=7, poolstride=1, poolpad=0, 
                      mode = 'average' )
        # output shape = (1x1x1024)

           
        l_flatten = T.flatten(poolx.output.dimshuffle(3, 0, 1, 2), 2)
        # output shape = (1024)                              
    
        dropout= Dropout(input=l_flatten,n_in=1024, n_out=1024, prob_drop=0.4)
        # output shape = (1024)
               
        
        softmax_layer = Softmax(input=dropout.output ,n_in=1024, n_out=n_softmax_out)
        # output shape = (n_softmax_out)       
        
        layers.append(softmax_layer)
        params += softmax_layer.params
        weight_types += softmax_layer.weight_type        
        
        # auxilary classifier
        print 'auxilary classifier 1:'
        aux1 = aux_tower(input=incep4a.output,input_shape=(512,14,14,batch_size),config=config)
        
        layers += aux1.layers
        params += aux1.params
        weight_types += aux1.weight_types
        
        print 'auxilary classifier 2:'                               
        aux2 = aux_tower(input=incep4d.output,input_shape=(528,14,14,batch_size),config=config)
        
        layers += aux2.layers
        params += aux2.params
        weight_types += aux2.weight_types 

        self.layers = layers
        self.params = params
        self.weight_types = weight_types        
        self.output = softmax_layer.p_y_given_x
        self.cost = softmax_layer.negative_log_likelihood(y)+\
                0.3*aux1.negative_log_likelihood(y)+0.3*aux2.negative_log_likelihood(y)        
        self.error = softmax_layer.errors(y)
        self.error_top_5 = softmax_layer.errors_top_x(y)
        
        # training related
        self.base_lr = np.float32(config['learning_rate'])
        self.shared_lr = theano.shared(self.base_lr)
        self.mu = config['momentum'] # def: 0.9 # momentum
        self.eta = config['weight_decay'] #0.0002 # weight decay
        
        self.shared_x = theano.shared(np.zeros((3, config['input_width'], 
                                                  config['input_height'], 
                                                  config['file_batch_size']), 
                                                  dtype=theano.config.floatX),  
                                                  borrow=True)
                                              
        self.shared_y = theano.shared(np.zeros((config['file_batch_size'],), 
                                          dtype=int),   borrow=True)
        
        # shared variable for storing momentum before exchanging momentum(delta w)
        self.vels = [theano.shared(param_i.get_value() * 0.)
            for param_i in self.params]
        
        # shared variable for accepting momentum during exchanging momentum(delta w)
        self.vels2 = [theano.shared(param_i.get_value() * 0.)
            for param_i in self.params]
            
        self.train = None
        self.get_vel = None
        self.descent_vel = None
        self.val = None
        self.inference = None
Exemple #28
0
 def __init__(self):
     ModelBase.__init__(self)
     self.model = LinearSVC()
Exemple #29
0
	def get_all(self):
		cursor = self.collection.find()
		return ModelBase.cursor2list(cursor)