Exemplo n.º 1
0
    def train(self, X, Y, num_iterations=1000, learning_rate=0.01, batch_size=50):
	assert Y.shape[1] == self.config['layers'][-1], 'output dimensions do not match.'
	assert X.shape[1] == self.model.layers[0].get_weights().shape[0], 'input dimensions do not match.'
	
	self.config.update({'iterations':num_iterations, 'learning_rate':learning_rate, 'batch_size':batch_size})
        
	algorithm = pylearn2.training_algorithms.sgd.SGD(
	    learning_rate = self.config['learning_rate'],
	    #init_momentum = 0.5,
	    batch_size=self.config['batch_size'],
	    batches_per_iter=X.shape[0]//self.config['batch_size'],
	    cost = pylearn2.costs.mlp.Default(),	    
	    termination_criterion = pylearn2.termination_criteria.EpochCounter(self.config['iterations'])
	)
	
	
	data_train = pylearn2.datasets.DenseDesignMatrix(X=X, y=Y)

	train = pylearn2.train.Train(
	    dataset = data_train,
	    model = self.model,
	    algorithm = algorithm,
	    save_path = 'tmp.pkl',
	    save_freq = 100)
	
	train.main_loop()
Exemplo n.º 2
0
    def train(self, X, num_iterations=1000, learning_rate=0.01, batch_size=50):
	self.config.update( {'iterations':num_iterations, 'learning_rate':learning_rate, 'batch_size':batch_size})
	
	algorithm = pylearn2.training_algorithms.sgd.SGD(
	            learning_rate = self.config['learning_rate'],
	            batch_size = batch_size,
	            batches_per_iter=int(X.shape[0]/batch_size),
	            cost = pylearn2.costs.autoencoder.MeanSquaredReconstructionError(),	    
	            termination_criterion = pylearn2.termination_criteria.EpochCounter(self.config['iterations']),
	        )
	
	
	
	data_train = pylearn2.datasets.DenseDesignMatrix(X=X[:,:])
	
	train = pylearn2.train.Train(
	            dataset = data_train,
	            model = self.model,
	            algorithm = algorithm,
	            save_path = 'tmp.pkl',
	            save_freq = 100,
	            extensions=[pylearn2.training_algorithms.sgd.OneOverEpoch(self.config['iterations']//2, self.config['iterations']//8)]
	)
	
	train.main_loop()
Exemplo n.º 3
0
def compute_objective(args):
    '''Initializes neural network with specified arguments and trains.'''
    # Train network.
    train = init_train(args)
    train.main_loop()
    model = train.model

    # Return objective to hyperopt.
    loss = train.model.monitor.channels['valid_y_kl'].val_record[-1]
    return loss
Exemplo n.º 4
0
def compute_objective(args):
    '''Initializes neural network with specified arguments and trains.'''    
    # Train network.
    train = init_train(args)
    train.main_loop()
    model = train.model
     
    # Return objective to hyperopt.
    loss = train.model.monitor.channels['valid_y_kl'].val_record[-1]
    return loss
Exemplo n.º 5
0
def main(argv, freeze):

  try:
    opts, args = getopt.getopt(argv, '')
    yaml = args[0]
    model = args[1]
  except getopt.GetoptError:
    usage()
    sys.exit(2)

  # Load yaml
  with open(yaml, "r") as sty:
    train = serial.load_train_file(yaml)
    #train = yaml_parse.load(sty)


  # Load pretrained model with bad sigmoid output
  with  open(model, 'r') as fo:
    model = pkl.load(fo)

  # Remove the last layer, puts a real sigmoid instead
  if freeze:
    for i in range(0, len(model.layers) - 2):
      model.freeze(model.layers[i].get_params())


  ### Add last conv elemwise
  layer = ConvElemwise(layer_name= 'out',
                       output_channels= 1,
                       kernel_shape=(1,1),
                       irange=0.05,
                       nonlinearity=IdentityConvNonlinearity(),
                       max_kernel_norm= 7.9,
                       tied_b=1)
  layer.set_mlp(model)
  layer.set_input_space(model.layers[-3].get_output_space())
  model.layers[-2] = layer

  ### Add Sigmoid
  layer = SigmoidExtended(layer_name='y', n_classes=1)
  layer.set_mlp(model)
  layer.set_input_space(model.layers[-2].get_output_space())
  model.layers[-1] = layer

  #print model.layers
  #model.monitor = train.model.monitor
  #train.model = model
  train.model = push_monitor(model, "old")
  print train.model


  #train = Train(train.dataset, model, train.algorithm, train.save_path,
  #                train.save_freq, train.extensions, train.allow_overwrite)
  train.main_loop()
Exemplo n.º 6
0
def main(argv, freeze):

    try:
        opts, args = getopt.getopt(argv, '')
        yaml = args[0]
        model = args[1]
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    # Load yaml
    with open(yaml, "r") as sty:
        train = serial.load_train_file(yaml)
        #train = yaml_parse.load(sty)

    # Load pretrained model with bad sigmoid output
    with open(model, 'r') as fo:
        model = pkl.load(fo)

    # Remove the last layer, puts a real sigmoid instead
    if freeze:
        for i in range(0, len(model.layers) - 2):
            model.freeze(model.layers[i].get_params())

    ### Add last conv elemwise
    layer = ConvElemwise(layer_name='out',
                         output_channels=1,
                         kernel_shape=(1, 1),
                         irange=0.05,
                         nonlinearity=IdentityConvNonlinearity(),
                         max_kernel_norm=7.9,
                         tied_b=1)
    layer.set_mlp(model)
    layer.set_input_space(model.layers[-3].get_output_space())
    model.layers[-2] = layer

    ### Add Sigmoid
    layer = SigmoidExtended(layer_name='y', n_classes=1)
    layer.set_mlp(model)
    layer.set_input_space(model.layers[-2].get_output_space())
    model.layers[-1] = layer

    #print model.layers
    #model.monitor = train.model.monitor
    #train.model = model
    train.model = push_monitor(model, "old")
    print train.model

    #train = Train(train.dataset, model, train.algorithm, train.save_path,
    #                train.save_freq, train.extensions, train.allow_overwrite)
    train.main_loop()
Exemplo n.º 7
0
    def train(self, X, num_iterations=1000, learning_rate=0.01, batch_size=50):
	self.config.update( {'iterations':num_iterations, 'learning_rate':learning_rate, 'batch_size':batch_size})	
	data_train = pylearn2.datasets.DenseDesignMatrix(X=X[:,:])
	self._build_algorithm(X.shape[0])
	train = pylearn2.train.Train(
	            dataset = data_train,
	            model = self.model,
	            algorithm = self.algorithm,
	            save_path = 'tmp.pkl',
	            save_freq = 100,
	            extensions=[pylearn2.training_algorithms.sgd.OneOverEpoch(max(1,self.config['iterations']//2), max(1,self.config['iterations']//8))]
	)
	
	train.main_loop()
Exemplo n.º 8
0
def Compute_Objective(args,conf, **kwargs): 

        '''Initializes neural network with specified arguments and 
           configuration and trains.
        '''    
        # Train network.
        old_stdout = sys.stdout
        old_stderr = sys.stderr
        dev_path = os.environ['DNN_PATH']

        modelname = '%s_%d_%d_%d_%0.14f_%0.14f_%0.6f_%d_%0.6f' % tuple(args[:-1])
        modelname = '%s_%s_%d' % (str( conf['ptype'] ),modelname,
                                        int( conf['batch_size']) )


        train = init_train(args,conf,**kwargs)
        train.main_loop()
        model = train.model

        sys.stdout = old_stdout
        sys.stderr = old_stderr
         
        # Return objective to hyperopt.
        loss = train.model.monitor.channels['valid_objective'].val_record

        try:
            fig = plt.figure()
            fig.suptitle('Progress of Training', fontsize=20)
            plt.xlabel('Number of Epochs')
            plt.ylabel('Value from Loss-function')
            plt.plot(loss)
            plt.plot(loss,'b.')
            plt.savefig('%s/figure/objective/%s.pdf' % (dev_path,modelname))
            plt.close('all')
            np.savetxt('%s/figure/objective/%s.dat' % (dev_path,modelname), loss)
        except:
            pass
           
        return loss[-1]
Exemplo n.º 9
0
                                                   input_include_probs={'hidden_0':1., 'hidden_1':1., 'hidden_2':1., 'hidden_3':1., 'y':0.5},
                                                   input_scales={ 'hidden_0': 1., 'hidden_1':1., 'hidden_2':1., 'hidden_3':1., 'y':2.}),
                                               
                                               update_callbacks=pylearn2.training_algorithms.sgd.ExponentialDecay(
                                                   decay_factor=1.0000003, # Decreases by this factor every batch. (1/(1.000001^8000)^100 
                                                   min_lr=.000001
                                               ));

## EXTENSION

extensions=[pylearn2.training_algorithms.learning_rule.MomentumAdjustor(start=0,
                                                                        saturate=momentum_saturate,
                                                                        final_momentum=.99)];

## TRAINING MODEL

train=pylearn2.train.Train(dataset=train_set,
                           model=model,
                           algorithm=algorithm,
                           save_path=save_path,
                           save_freq=100);

debug = False
logfile = os.path.splitext(train.save_path)[0] + '.log'
print 'Using=%s' % theano.config.device # Can use gpus. 
print 'Writing to %s' % logfile
print 'Writing to %s' % train.save_path
sys.stdout = open(logfile, 'w')

train.main_loop();