Exemplo n.º 1
0
    def __init__(self, model_config):
        super(CNNPredictor, self).__init__(model_config, 'CNN')
        conv_config, conv_layer_config, mlp_config = load_conv_spec(
            self.model_config['nnet_spec'], self.batch_size,
            self.model_config['input_shape'])
        activationFn = parse_activation(mlp_config['activation'])
        if mlp_config['do_dropout'] or conv_config['do_dropout']:
            self.model = DropoutCNN(
                self.numpy_rng,
                self.theano_rng,
                conv_layer_configs=conv_layer_config,
                batch_size=self.batch_size,
                n_outs=self.model_config['n_outs'],
                hidden_layer_configs=mlp_config,
                hidden_activation=activationFn,
                use_fast=conv_config['use_fast'],
                l1_reg=mlp_config['l1_reg'],
                l2_reg=mlp_config['l1_reg'],
                max_col_norm=mlp_config['max_col_norm'],
                input_dropout_factor=conv_config['input_dropout_factor'])
        else:
            self.model = CNN(self.numpy_rng,
                             self.theano_rng,
                             conv_layer_configs=conv_layer_config,
                             batch_size=batch_size,
                             n_outs=self.model_config['n_outs'],
                             hidden_layer_configs=mlp_config,
                             hidden_activation=activationFn,
                             use_fast=conv_config['use_fast'],
                             l1_reg=mlp_config['l1_reg'],
                             l2_reg=mlp_config['l1_reg'],
                             max_col_norm=mlp_config['max_col_norm'])

        self.__load_model__(self.model_config['input_file'],
                            mlp_config['pretrained_layers'])
Exemplo n.º 2
0
def runDNN(arg):


	if type(arg) is dict:
		model_config = arg
	else :
		model_config = load_model(arg,'DNN')

	dnn_config = load_dnn_spec(model_config['nnet_spec'])
	data_spec =  load_data_spec(model_config['data_spec'],model_config['batch_size']);


	#generating Random
	numpy_rng = numpy.random.RandomState(model_config['random_seed'])
	theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
	
	activationFn = parse_activation(dnn_config['activation']);

	#create working dir
	createDir(model_config['wdir']);
	
	batch_size = model_config['batch_size'];
	n_ins = model_config['n_ins']
	n_outs = model_config['n_outs']
	
	max_col_norm = dnn_config['max_col_norm']
	l1_reg = dnn_config['l1_reg']
	l2_reg = dnn_config['l2_reg']	
	adv_activation = dnn_config['adv_activation']
	hidden_layers_sizes = dnn_config['hidden_layers']
	do_dropout = dnn_config['do_dropout']
	logger.info('Building the model')

	if do_dropout:
		dropout_factor = dnn_config['dropout_factor']
		input_dropout_factor = dnn_config['input_dropout_factor']

		dnn = DNN_Dropout(numpy_rng=numpy_rng, theano_rng = theano_rng, n_ins=n_ins,
			  hidden_layers_sizes=hidden_layers_sizes, n_outs=n_outs,
			  activation = activationFn, dropout_factor = dropout_factor,
			  input_dropout_factor = input_dropout_factor, adv_activation = adv_activation,
			  max_col_norm = max_col_norm, l1_reg = l1_reg, l2_reg = l2_reg)
	else:
		
		dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, n_ins=n_ins,
			  hidden_layers_sizes=hidden_layers_sizes, n_outs=n_outs,
			  activation = activationFn, adv_activation = adv_activation,
			  max_col_norm = max_col_norm, l1_reg = l1_reg, l2_reg = l2_reg)


	logger.info("Loading Pretrained network weights")
	try:
		# pretraining
		ptr_file = model_config['input_file']
		pretrained_layers = dnn_config['pretrained_layers']
		dnn.load(filename=ptr_file,max_layer_num = pretrained_layers,  withfinal=True)
	except KeyError, e:
		logger.critical("KeyMissing:"+str(e));
		logger.error("Pretrained network Missing in configFile")
		sys.exit(2)
Exemplo n.º 3
0
def runCNN3D(arg):

    if type(arg) is dict:
        model_config = arg
    else:
        model_config = load_model(arg, 'CNN')

    conv_config, conv_layer_config, mlp_config = load_conv_spec(
        model_config['nnet_spec'], model_config['batch_size'],
        model_config['input_shape'])
    #__debugPrintData__(conv_layer_config,'covolution config')

    data_spec = load_data_spec(model_config['data_spec'],
                               model_config['batch_size'])

    numpy_rng = numpy.random.RandomState(model_config['random_seed'])
    theano_rng = RandomStreams(numpy_rng.randint(2**30))

    logger.info('> ... building the model')
    hidden_activation = parse_activation(mlp_config['activation'])

    createDir(model_config['wdir'])

    #create working dir

    batch_size = model_config['batch_size']

    cnn = CNN3D(numpy_rng,
                theano_rng,
                conv_layer_configs=conv_layer_config,
                batch_size=batch_size,
                n_outs=model_config['n_outs'],
                hidden_layer_configs=mlp_config,
                hidden_activation=hidden_activation,
                l1_reg=mlp_config['l1_reg'],
                l2_reg=mlp_config['l1_reg'],
                max_col_norm=mlp_config['max_col_norm'])

    ########################
    # Loading  THE MODEL #
    ########################
    try:
        # pretraining
        ptr_file = model_config['input_file']
        pretrained_layers = mlp_config['pretrained_layers']
        logger.info("Loading the pretrained network..")
        cnn.load(filename=ptr_file,
                 max_layer_num=pretrained_layers,
                 withfinal=True)
    except KeyError, e:
        logger.warning(
            "Pretrained network missing in working directory, skipping model loading"
        )
Exemplo n.º 4
0
def runCNN(arg):
	
	if type(arg) is dict:
		model_config = arg
	else :
		model_config = load_model(arg,'CNN')
	
	conv_config,conv_layer_config,mlp_config = load_conv_spec(
			model_config['nnet_spec'],
			model_config['batch_size'],
			model_config['input_shape'])

	data_spec =  load_data_spec(model_config['data_spec'],model_config['batch_size']);

	
	numpy_rng = numpy.random.RandomState(89677)
	theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
	
	logger.info('> ... building the model')
	activationFn = parse_activation(mlp_config['activation']);

	createDir(model_config['wdir']);
	#create working dir

	batch_size = model_config['batch_size'];
	if mlp_config['do_dropout'] or conv_config['do_dropout']:
		logger.info('>Initializing dropout cnn model')
		cnn = DropoutCNN(numpy_rng,theano_rng,conv_layer_configs = conv_layer_config, batch_size = batch_size,
				n_outs=model_config['n_outs'],hidden_layer_configs=mlp_config, 
				hidden_activation = activationFn,
				use_fast = conv_config['use_fast'],l1_reg = mlp_config['l1_reg'],
				l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'],
				input_dropout_factor=conv_config['input_dropout_factor'])
	else:
		cnn = CNN(numpy_rng,theano_rng,conv_layer_configs = conv_layer_config, batch_size = batch_size,
				n_outs=model_config['n_outs'],hidden_layer_configs=mlp_config, 
				hidden_activation = activationFn,
				use_fast = conv_config['use_fast'],l1_reg = mlp_config['l1_reg'],
				l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'])
				
	########################
	 # Loading  THE MODEL #
	########################
	try:
		# pretraining
		ptr_file = model_config['input_file']
		pretrained_layers = mlp_config['pretrained_layers']
		logger.info("Loading the pretrained network..")
		cnn.load(filename=ptr_file,max_layer_num = pretrained_layers,  withfinal=True)
	except KeyError, e:
		logger.warning("Pretrained network missing in working directory, skipping model loading")
Exemplo n.º 5
0
	def __init__(self,model_config):
		super(CNN3dPredictor, self).__init__(model_config,'CNN3d');
		conv_config,conv_layer_config,mlp_config = load_conv_spec(self.model_config['nnet_spec'],
														self.batch_size,
														self.model_config['input_shape'])
		activationFn = parse_activation(mlp_config['activation']);
		
		self.model = CNN3D(self.numpy_rng,self.theano_rng,conv_layer_configs = conv_layer_config, 
			batch_size = self.batch_size, n_outs= self.model_config['n_outs'],
			hidden_layer_configs=mlp_config,hidden_activation = hidden_activation,
			l1_reg = mlp_config['l1_reg'],l2_reg = mlp_config['l1_reg'],
			max_col_norm = mlp_config['max_col_norm'])
		
		self.__load_model__(self.model_config['input_file'],mlp_config['pretrained_layers']);
Exemplo n.º 6
0
    def __init__(self, model_config):
        super(DNNPredictor, self).__init__(model_config, "DNN")
        mlp_config = load_dnn_spec(self.model_config["nnet_spec"])
        activationFn = parse_activation(mlp_config["activation"])
        n_ins = model_config["n_ins"]
        n_outs = model_config["n_outs"]
        max_col_norm = mlp_config["max_col_norm"]
        l1_reg = mlp_config["l1_reg"]
        l2_reg = mlp_config["l2_reg"]
        adv_activation = mlp_config["adv_activation"]
        hidden_layers_sizes = mlp_config["hidden_layers"]
        do_dropout = mlp_config["do_dropout"]

        if do_dropout:
            dropout_factor = dnn_config["dropout_factor"]
            input_dropout_factor = dnn_config["input_dropout_factor"]
            self.model = DNN_Dropout(
                numpy_rng=self.numpy_rng,
                theano_rng=self.theano_rng,
                n_ins=n_ins,
                hidden_layers_sizes=hidden_layers_sizes,
                n_outs=n_outs,
                activation=activationFn,
                dropout_factor=dropout_factor,
                input_dropout_factor=input_dropout_factor,
                adv_activation=adv_activation,
                max_col_norm=max_col_norm,
                l1_reg=l1_reg,
                l2_reg=l2_reg,
            )
        else:
            self.model = DNN(
                numpy_rng=self.numpy_rng,
                theano_rng=self.theano_rng,
                n_ins=n_ins,
                hidden_layers_sizes=hidden_layers_sizes,
                n_outs=n_outs,
                activation=activationFn,
                adv_activation=adv_activation,
                max_col_norm=max_col_norm,
                l1_reg=l1_reg,
                l2_reg=l2_reg,
            )

        self.__load_model__(self.model_config["input_file"], mlp_config["pretrained_layers"])
Exemplo n.º 7
0
def runCNN3D(arg):
	
	if type(arg) is dict:
		model_config = arg
	else :
		model_config = load_model(arg,'CNN')
	
	conv_config,conv_layer_config,mlp_config = load_conv_spec(
			model_config['nnet_spec'],
			model_config['batch_size'],
			model_config['input_shape'])
	#__debugPrintData__(conv_layer_config,'covolution config')
	
	data_spec =  load_data_spec(model_config['data_spec'],model_config['batch_size']);
	
	numpy_rng = numpy.random.RandomState(model_config['random_seed'])
	theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
	
	logger.info('> ... building the model')
	hidden_activation = parse_activation(mlp_config['activation']);

	createDir(model_config['wdir']);
	
	#create working dir

	batch_size = model_config['batch_size'];
	
	cnn = CNN3D(numpy_rng,theano_rng,conv_layer_configs = conv_layer_config, batch_size = batch_size,
			n_outs=model_config['n_outs'],hidden_layer_configs=mlp_config,hidden_activation = hidden_activation,
			l1_reg = mlp_config['l1_reg'],l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'])
	
				
	########################
	 # Loading  THE MODEL #
	########################
	try:
		# pretraining
		ptr_file = model_config['input_file']
		pretrained_layers = mlp_config['pretrained_layers']
		logger.info("Loading the pretrained network..")
		cnn.load(filename=ptr_file,max_layer_num = pretrained_layers,  withfinal=True)
	except KeyError, e:
		logger.warning("Pretrained network missing in working directory, skipping model loading")
Exemplo n.º 8
0
    def __init__(self, model_config):
        super(DNNPredictor, self).__init__(model_config, 'DNN')
        mlp_config = load_dnn_spec(self.model_config['nnet_spec'])
        activationFn = parse_activation(mlp_config['activation'])
        n_ins = model_config['n_ins']
        n_outs = model_config['n_outs']
        max_col_norm = mlp_config['max_col_norm']
        l1_reg = mlp_config['l1_reg']
        l2_reg = mlp_config['l2_reg']
        adv_activation = mlp_config['adv_activation']
        hidden_layers_sizes = mlp_config['hidden_layers']
        do_dropout = mlp_config['do_dropout']

        if do_dropout:
            dropout_factor = dnn_config['dropout_factor']
            input_dropout_factor = dnn_config['input_dropout_factor']
            self.model = DNN_Dropout(numpy_rng=self.numpy_rng,
                                     theano_rng=self.theano_rng,
                                     n_ins=n_ins,
                                     hidden_layers_sizes=hidden_layers_sizes,
                                     n_outs=n_outs,
                                     activation=activationFn,
                                     dropout_factor=dropout_factor,
                                     input_dropout_factor=input_dropout_factor,
                                     adv_activation=adv_activation,
                                     max_col_norm=max_col_norm,
                                     l1_reg=l1_reg,
                                     l2_reg=l2_reg)
        else:
            self.model = DNN(numpy_rng=self.numpy_rng,
                             theano_rng=self.theano_rng,
                             n_ins=n_ins,
                             hidden_layers_sizes=hidden_layers_sizes,
                             n_outs=n_outs,
                             activation=activationFn,
                             adv_activation=adv_activation,
                             max_col_norm=max_col_norm,
                             l1_reg=l1_reg,
                             l2_reg=l2_reg)

        self.__load_model__(self.model_config['input_file'],
                            mlp_config['pretrained_layers'])
Exemplo n.º 9
0
def runRBM(arg):

    if type(arg) is dict:
        model_config = arg
    else:
        model_config = load_model(arg, 'RBM')

    rbm_config = load_rbm_spec(model_config['nnet_spec'])
    data_spec = load_data_spec(model_config['data_spec'],
                               model_config['batch_size'])

    #generating Random
    numpy_rng = numpy.random.RandomState(model_config['random_seed'])
    theano_rng = RandomStreams(numpy_rng.randint(2**30))

    activationFn = parse_activation(rbm_config['activation'])

    createDir(model_config['wdir'])
    #create working dir

    batch_size = model_config['batch_size']
    wdir = model_config['wdir']

    dbn = DBN(numpy_rng=numpy_rng,
              theano_rng=theano_rng,
              n_ins=model_config['n_ins'],
              hidden_layers_sizes=rbm_config['hidden_layers'],
              n_outs=model_config['n_outs'],
              first_layer_gb=rbm_config['first_layer_gb'],
              pretrainedLayers=rbm_config['pretrained_layers'],
              activation=activationFn)

    logger.info("Loading Pretrained network weights")
    try:
        # pretraining
        ptr_file = model_config['input_file']
        dbn.load(filename=ptr_file)
    except KeyError, e:
        logger.info("KeyMissing:" + str(e))
        logger.info(
            "Pretrained network Missing in configFile: Skipping Loading")
Exemplo n.º 10
0
	def __init__(self,model_config):
		super(CNNPredictor, self).__init__(model_config,'CNN');
		conv_config,conv_layer_config,mlp_config = load_conv_spec(self.model_config['nnet_spec'],
														self.batch_size,
														self.model_config['input_shape'])
		activationFn = parse_activation(mlp_config['activation']);
		if mlp_config['do_dropout'] or conv_config['do_dropout']:
			self.model = DropoutCNN(self.numpy_rng,self.theano_rng,conv_layer_configs = conv_layer_config, 
				batch_size = self.batch_size, n_outs=self.model_config['n_outs'],
				hidden_layer_configs=mlp_config, hidden_activation = activationFn,
				use_fast = conv_config['use_fast'],l1_reg = mlp_config['l1_reg'],
				l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'],
				input_dropout_factor=conv_config['input_dropout_factor'])
		else:
			self.model = CNN(self.numpy_rng,self.theano_rng,conv_layer_configs = conv_layer_config,
				batch_size = batch_size, n_outs=self.model_config['n_outs'],
				hidden_layer_configs=mlp_config,  hidden_activation = activationFn,
				use_fast = conv_config['use_fast'],l1_reg = mlp_config['l1_reg'],
				l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'])
		
		self.__load_model__(self.model_config['input_file'],mlp_config['pretrained_layers']);
Exemplo n.º 11
0
def runRBM(arg):

    if type(arg) is dict:
        model_config = arg
    else :
        model_config = load_model(arg,'RBM')

    rbm_config = load_rbm_spec(model_config['nnet_spec'])
    data_spec =  load_data_spec(model_config['data_spec'],model_config['batch_size']);


    #generating Random
    numpy_rng = numpy.random.RandomState(model_config['random_seed'])
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

    activationFn = parse_activation(rbm_config['activation']);
 
    createDir(model_config['wdir']);
    #create working dir

    batch_size = model_config['batch_size']
    wdir = model_config['wdir']
    

    dbn = DBN(numpy_rng=numpy_rng, theano_rng = theano_rng, n_ins=model_config['n_ins'],
            hidden_layers_sizes=rbm_config['hidden_layers'],n_outs=model_config['n_outs'],
            first_layer_gb = rbm_config['first_layer_gb'],
            pretrainedLayers=rbm_config['pretrained_layers'],
            activation=activationFn)
    
    logger.info("Loading Pretrained network weights")
    try:
    # pretraining
        ptr_file = model_config['input_file']
        dbn.load(filename=ptr_file)
    except KeyError, e:
        logger.info("KeyMissing:"+str(e));
        logger.info("Pretrained network Missing in configFile: Skipping Loading");
Exemplo n.º 12
0
	def __init__(self, numpy_rng, theano_rng, batch_size, n_outs,conv_layer_configs, hidden_layer_configs,
		hidden_activation = T.nnet.sigmoid,l1_reg=None,l2_reg=None,max_col_norm=None):

		super(CNN3D, self).__init__(conv_layer_configs, hidden_layer_configs,l1_reg,l2_reg,max_col_norm)
		if not theano_rng:
			theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
			            
		for i in xrange(self.conv_layer_num):		# construct the convolution layer
			if i == 0:  				#is_input layer
				input = self.x
			else:
				input = self.layers[-1].output #output of previous layer
			
			config = conv_layer_configs[i]
			conv_activation = parse_activation(config['activation']);
			conv_layer = ConvLayer(numpy_rng=numpy_rng, input=input,input_shape=config['input_shape'],
				filter_shape=config['filter_shape'],poolsize=config['poolsize'],
				activation = conv_activation)
			self.layers.append(conv_layer)
			self.conv_layers.append(conv_layer)
			if config['update']==True:	# only few layers of convolution layer are considered for updation
				self.params.extend(conv_layer.params)
				self.delta_params.extend(conv_layer.delta_params)

		hidden_layers = hidden_layer_configs['hidden_layers'];
		self.conv_output_dim = numpy.prod(config['output_shape'][1:])
		adv_activation_configs = hidden_layer_configs['adv_activation'] 
		
		#flattening the last convolution output layer
		self.features = self.conv_layers[-1].output.flatten(2);
		self.features_dim = self.conv_output_dim;
		
		for i in xrange(self.hidden_layer_num):		# construct the hidden layer
			if i == 0:				# is first sigmoidla layer
				input_size = self.conv_output_dim
				layer_input = self.features
			else:
				input_size = hidden_layers[i - 1]	# number of hidden neurons in previous layers
				layer_input = self.layers[-1].output
			
			
			if adv_activation_configs is None:
				sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input,n_in=input_size, 
						n_out = hidden_layers[i], activation=hidden_activation);
						
			else:
				sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input,n_in=input_size, 
						n_out = hidden_layers[i]*adv_activation_configs['pool_size'], activation=hidden_activation,
						adv_activation_method = adv_activation_configs['method'],
						pool_size = adv_activation_configs['pool_size'],
						pnorm_order = adv_activation_configs['pnorm_order']);
						
						
			self.layers.append(sigmoid_layer)
			self.mlp_layers.append(sigmoid_layer)

			if config['update']==True:	# only few layers of hidden layer are considered for updation
                		self.params.extend(sigmoid_layer.params)
                		self.delta_params.extend(sigmoid_layer.delta_params)
           

		self.logLayer = LogisticRegression(input=self.layers[-1].output,n_in=hidden_layers[-1],n_out=n_outs)
		
		self.layers.append(self.logLayer)
		self.params.extend(self.logLayer.params)
		self.delta_params.extend(self.logLayer.delta_params)
		
		self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
		self.errors = self.logLayer.errors(self.y)
		self.output = self.logLayer.prediction()
		
		#regularization
		if self.l1_reg is not None:
			self.__l1Regularization__(self.hidden_layer_num*2);
		if self.l2_reg is not None:
			self.__l2Regularization__(self.hidden_layer_num*2);
Exemplo n.º 13
0
	def __init__(self, numpy_rng, theano_rng, batch_size, n_outs,conv_layer_configs, hidden_layer_configs, 
			use_fast=False,hidden_activation = T.nnet.sigmoid,l1_reg=None,l2_reg=None,
			max_col_norm=None,input_dropout_factor=0.0):

		super(DropoutCNN, self).__init__(conv_layer_configs,hidden_layer_configs,l1_reg,l2_reg,max_col_norm)
		self.input_dropout_factor = input_dropout_factor;
		
		self.dropout_layers = []
		if not theano_rng:
			theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
            
		for i in xrange(self.conv_layer_num):		# construct the convolution layer
			if i == 0:  							#is_input layer
				conv_input = self.x
				if self.input_dropout_factor > 0.0:
					dropout_conv_input = _dropout_from_layer(theano_rng, self.x,self.input_dropout_factor)
				else:
					dropout_conv_input = self.x;	
			else:
				conv_input = (1-conv_layer_configs[i-1]['dropout_factor'])*self.layers[-1].output #output of previous layer
				dropout_conv_input = self.dropout_layers[-1].dropout_output;
				
			config = conv_layer_configs[i]
			conv_activation= parse_activation(config['activation'])
			dropout_conv_layer = DropoutConvLayer(numpy_rng=numpy_rng, input=dropout_conv_input,
				input_shape=config['input_shape'],filter_shape=config['filter_shape'],poolsize=config['poolsize'],
				activation = conv_activation, use_fast = use_fast,dropout_factor=conv_layer_configs[i]['dropout_factor'])
			
			conv_layer = ConvLayer(numpy_rng=numpy_rng, input=conv_input,input_shape=config['input_shape'],
				filter_shape=config['filter_shape'],poolsize=config['poolsize'],activation = conv_activation,
				use_fast = use_fast, W = dropout_conv_layer.W, b = dropout_conv_layer.b)
			
				
			self.dropout_layers.append(dropout_conv_layer);
			self.layers.append(conv_layer)
			self.conv_layers.append(conv_layer)
			
			if config['update']==True:	# only few layers of convolution layer are considered for updation
				self.params.extend(dropout_conv_layer.params)
				self.delta_params.extend(dropout_conv_layer.delta_params)

		hidden_layers = hidden_layer_configs['hidden_layers'];
		self.conv_output_dim = config['output_shape'][1] * config['output_shape'][2] * config['output_shape'][3]
		adv_activation_configs = hidden_layer_configs['adv_activation'] 
		
		#flattening the last convolution output layer
		self.dropout_features = self.dropout_layers[-1].dropout_output.flatten(2);
		self.features = self.conv_layers[-1].output.flatten(2);
		self.features_dim = self.conv_output_dim;

		self.dropout_layers = [];
		self.dropout_factor = hidden_layer_configs['dropout_factor'];
		
		for i in xrange(self.hidden_layer_num):		# construct the hidden layer
			if i == 0:								# is first sigmoidal layer
				input_size = self.conv_output_dim
				dropout_layer_input = self.dropout_features
				layer_input = self.features
			else:
				input_size = hidden_layers[i - 1]	# number of hidden neurons in previous layers
				dropout_layer_input = self.dropout_layers[-1].dropout_output			
				layer_input = (1 - self.dropout_factor[i-1]) * self.layers[-1].output
				
			if adv_activation_configs is None:
				dropout_sigmoid_layer = DropoutHiddenLayer(rng=numpy_rng, input=layer_input,n_in=input_size, 
						n_out = hidden_layers[i], activation=hidden_activation,
						dropout_factor = self.dropout_factor[i]);
						
				sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input,n_in=input_size, 
						n_out = hidden_layers[i], activation=hidden_activation,
						W=dropout_sigmoid_layer.W, b=dropout_sigmoid_layer.b);
										
						
			else:
				dropout_sigmoid_layer = DropoutHiddenLayer(rng=numpy_rng, input=layer_input,n_in=input_size, 
						n_out = hidden_layers[i]*adv_activation_configs['pool_size'], activation=hidden_activation,
						adv_activation_method = adv_activation_configs['method'],
						pool_size = adv_activation_configs['pool_size'],
						pnorm_order = adv_activation_configs['pnorm_order'],
						dropout_factor = self.dropout_factor[i]);
						
				sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input,n_in=input_size, 
						n_out = hidden_layers[i]*adv_activation_configs['pool_size'], activation=hidden_activation,
						adv_activation_method = adv_activation_configs['method'],
						pool_size = adv_activation_configs['pool_size'],
						pnorm_order = adv_activation_configs['pnorm_order'],
						W=dropout_sigmoid_layer.W, b=dropout_sigmoid_layer.b);
						
			self.layers.append(sigmoid_layer)
			self.dropout_layers.append(dropout_sigmoid_layer)
			self.mlp_layers.append(sigmoid_layer)

			if config['update']==True:	# only few layers of hidden layer are considered for updation
						self.params.extend(dropout_sigmoid_layer.params)
						self.delta_params.extend(dropout_sigmoid_layer.delta_params)

		self.dropout_logLayer = LogisticRegression(input=self.dropout_layers[-1].dropout_output,n_in=hidden_layers[-1],n_out=n_outs)
		self.logLayer = LogisticRegression(
							input=(1 - self.dropout_factor[-1]) * self.layers[-1].output,
							n_in=hidden_layers[-1],n_out=n_outs,
							W=self.dropout_logLayer.W, b=self.dropout_logLayer.b)
		
		self.dropout_layers.append(self.dropout_logLayer)
		self.layers.append(self.logLayer)
		self.params.extend(self.dropout_logLayer.params)
		self.delta_params.extend(self.dropout_logLayer.delta_params)
		
		self.finetune_cost = self.dropout_logLayer.negative_log_likelihood(self.y)
		self.errors = self.logLayer.errors(self.y)
		self.output = self.logLayer.prediction()
		
		#regularization
		if self.l1_reg is not None:
			self.__l1Regularization__(self.hidden_layer_num*2);
		if self.l2_reg is not None:
			self.__l2Regularization__(self.hidden_layer_num*2);
Exemplo n.º 14
0
    def __init__(self,
                 numpy_rng,
                 theano_rng,
                 batch_size,
                 n_outs,
                 conv_layer_configs,
                 hidden_layer_configs,
                 hidden_activation=T.nnet.sigmoid,
                 l1_reg=None,
                 l2_reg=None,
                 max_col_norm=None):

        super(CNN3D, self).__init__(conv_layer_configs, hidden_layer_configs,
                                    l1_reg, l2_reg, max_col_norm)
        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2**30))

        for i in xrange(
                self.conv_layer_num):  # construct the convolution layer
            if i == 0:  #is_input layer
                input = self.x
            else:
                input = self.layers[-1].output  #output of previous layer

            config = conv_layer_configs[i]
            conv_activation = parse_activation(config['activation'])
            conv_layer = ConvLayer(numpy_rng=numpy_rng,
                                   input=input,
                                   input_shape=config['input_shape'],
                                   filter_shape=config['filter_shape'],
                                   poolsize=config['poolsize'],
                                   activation=conv_activation)
            self.layers.append(conv_layer)
            self.conv_layers.append(conv_layer)
            if config[
                    'update'] == True:  # only few layers of convolution layer are considered for updation
                self.params.extend(conv_layer.params)
                self.delta_params.extend(conv_layer.delta_params)

        hidden_layers = hidden_layer_configs['hidden_layers']
        self.conv_output_dim = numpy.prod(config['output_shape'][1:])
        adv_activation_configs = hidden_layer_configs['adv_activation']

        #flattening the last convolution output layer
        self.features = self.conv_layers[-1].output.flatten(2)
        self.features_dim = self.conv_output_dim

        for i in xrange(self.hidden_layer_num):  # construct the hidden layer
            if i == 0:  # is first sigmoidla layer
                input_size = self.conv_output_dim
                layer_input = self.features
            else:
                input_size = hidden_layers[
                    i - 1]  # number of hidden neurons in previous layers
                layer_input = self.layers[-1].output

            if adv_activation_configs is None:
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                            input=layer_input,
                                            n_in=input_size,
                                            n_out=hidden_layers[i],
                                            activation=hidden_activation)

            else:
                sigmoid_layer = HiddenLayer(
                    rng=numpy_rng,
                    input=layer_input,
                    n_in=input_size,
                    n_out=hidden_layers[i] *
                    adv_activation_configs['pool_size'],
                    activation=hidden_activation,
                    adv_activation_method=adv_activation_configs['method'],
                    pool_size=adv_activation_configs['pool_size'],
                    pnorm_order=adv_activation_configs['pnorm_order'])

            self.layers.append(sigmoid_layer)
            self.mlp_layers.append(sigmoid_layer)

            if config[
                    'update'] == True:  # only few layers of hidden layer are considered for updation
                self.params.extend(sigmoid_layer.params)
                self.delta_params.extend(sigmoid_layer.delta_params)

        self.logLayer = LogisticRegression(input=self.layers[-1].output,
                                           n_in=hidden_layers[-1],
                                           n_out=n_outs)

        self.layers.append(self.logLayer)
        self.params.extend(self.logLayer.params)
        self.delta_params.extend(self.logLayer.delta_params)

        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
        self.errors = self.logLayer.errors(self.y)
        self.output = self.logLayer.prediction()

        #regularization
        if self.l1_reg is not None:
            self.__l1Regularization__(self.hidden_layer_num * 2)
        if self.l2_reg is not None:
            self.__l2Regularization__(self.hidden_layer_num * 2)
Exemplo n.º 15
0
def runCNN(arg):

    if type(arg) is dict:
        model_config = arg
    else:
        model_config = load_model(arg, 'CNN')

    conv_config, conv_layer_config, mlp_config = load_conv_spec(
        model_config['nnet_spec'], model_config['batch_size'],
        model_config['input_shape'])

    data_spec = load_data_spec(model_config['data_spec'],
                               model_config['batch_size'])

    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2**30))

    logger.info('> ... building the model')
    activationFn = parse_activation(mlp_config['activation'])

    createDir(model_config['wdir'])
    #create working dir

    batch_size = model_config['batch_size']
    if mlp_config['do_dropout'] or conv_config['do_dropout']:
        logger.info('>Initializing dropout cnn model')
        cnn = DropoutCNN(
            numpy_rng,
            theano_rng,
            conv_layer_configs=conv_layer_config,
            batch_size=batch_size,
            n_outs=model_config['n_outs'],
            hidden_layer_configs=mlp_config,
            hidden_activation=activationFn,
            use_fast=conv_config['use_fast'],
            l1_reg=mlp_config['l1_reg'],
            l2_reg=mlp_config['l1_reg'],
            max_col_norm=mlp_config['max_col_norm'],
            input_dropout_factor=conv_config['input_dropout_factor'])
    else:
        cnn = CNN(numpy_rng,
                  theano_rng,
                  conv_layer_configs=conv_layer_config,
                  batch_size=batch_size,
                  n_outs=model_config['n_outs'],
                  hidden_layer_configs=mlp_config,
                  hidden_activation=activationFn,
                  use_fast=conv_config['use_fast'],
                  l1_reg=mlp_config['l1_reg'],
                  l2_reg=mlp_config['l1_reg'],
                  max_col_norm=mlp_config['max_col_norm'])

    ########################
    # Loading  THE MODEL #
    ########################
    try:
        # pretraining
        ptr_file = model_config['input_file']
        pretrained_layers = mlp_config['pretrained_layers']
        logger.info("Loading the pretrained network..")
        cnn.load(filename=ptr_file,
                 max_layer_num=pretrained_layers,
                 withfinal=True)
    except KeyError, e:
        logger.warning(
            "Pretrained network missing in working directory, skipping model loading"
        )
Exemplo n.º 16
0
def runSdA(arg):

    if type(arg) is dict:
        model_config = arg
    else :
        model_config = load_model(arg,'SDA')
        
    sda_config = load_sda_spec(model_config['nnet_spec'])
    data_spec =  load_data_spec(model_config['data_spec'],model_config['batch_size']);

    # numpy random generator
    numpy_rng = numpy.random.RandomState(model_config['random_seed'])
    #theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

    #get Activation function
    activationFn = parse_activation(sda_config['activation']);

    createDir(model_config['wdir']);
    #create working dir

    logger.info('building the model')
    # construct the stacked denoising autoencoder class
    sda = SDA(numpy_rng=numpy_rng, n_ins=model_config['n_ins'],
              hidden_layers_sizes=sda_config['hidden_layers'],
              n_outs=model_config['n_outs'],activation=activationFn)

    batch_size = model_config['batch_size'];


    #########################
    # PRETRAINING THE MODEL #
    #########################
    if model_config['processes']['pretraining']:
        
        train_sets = read_dataset(data_spec['training'])
        pretraining_config = model_config['pretrain_params']
        corruption_levels = sda_config['corruption_levels']

        preTraining(sda,train_sets,corruption_levels,pretraining_config);
        del train_sets;

    ########################
    # FINETUNING THE MODEL #
    ########################
    if model_config['processes']['finetuning']:
        fineTunning(sda,model_config,data_spec)

    ########################
    #  TESTING THE MODEL   #
    ########################
    if model_config['processes']['testing']:
        testing(sda,data_spec)

    ##########################
    ##   Export Features    ##
    ##########################
    if model_config['processes']['export_data']:
        exportFeatures(sda,model_config,data_spec)

    # save the pretrained nnet to file
    logger.info('Saving model to ' + str(model_config['output_file']) + '....')
    sda.save(filename=model_config['output_file'], withfinal=True)
    logger.info('Saved model to ' + str(model_config['output_file']))