示例#1
0
    description='''Convert gpu pickle pynet model to cpu pickle pynet model''')
parser.add_argument('--gpu_model',
                    metavar='Path',
                    required=True,
                    help='the path to the gpu model pickle file')
parser.add_argument('--cpu_model',
                    metavar='Path',
                    required=True,
                    help='''path to save the cpu model pickle file''')
args = parser.parse_args()

print('loading gpu mlp..')
fin = open(args.gpu_model)
gpu_model = cPickle.load(fin)

mlp = MLP(input_dim=gpu_model.input_dim)
for layer in gpu_model.layers:
    layerW = T._shared(np.array(layer.W.get_value(), floatX),
                       name=layer.W.name,
                       borrow=False)
    layerb = T._shared(np.array(layer.b.get_value(), floatX),
                       name=layer.b.name,
                       borrow=False)
    mlp_layer = getattr(layers, layer.__class__.__name__)(dim=layer.dim,
                                                          name=layer.name,
                                                          W=layerW,
                                                          b=layerb)
    mlp.add_layer(mlp_layer)
    print 'mlp layer', mlp_layer.name, mlp_layer.dim
print 'layers', mlp.layers
示例#2
0
def mlp():

    # build dataset
    data = Mnist(preprocessor=None, train_valid_test_ratio=[5, 1, 1])

    # build mlp
    mlp = MLP(input_dim=data.feature_size())

    W1 = GaussianWeight(prev_dim=mlp.input_dim, this_dim=1000)
    hidden1 = PRELU(dim=1000,
                    name='h1_layer',
                    W=W1(mean=0, std=0.1),
                    b=None,
                    dropout_below=None)

    mlp.add_layer(hidden1)

    W2 = XavierWeight(prev_dim=hidden1.dim, this_dim=data.target_size())
    output = Softmax(dim=data.target_size(),
                     name='output_layer',
                     W=W2(),
                     b=None,
                     dropout_below=None)

    mlp.add_layer(output)

    # build learning method
    learning_method = AdaGrad(learning_rate=0.1, momentum=0.9)

    # set the learning rules
    learning_rule = LearningRule(max_col_norm=10,
                                 L1_lambda=None,
                                 L2_lambda=None,
                                 training_cost=Cost(type='mse'),
                                 learning_rate_decay_factor=None,
                                 stopping_criteria={
                                     'max_epoch': 300,
                                     'epoch_look_back': 10,
                                     'cost': Cost(type='error'),
                                     'percent_decrease': 0.01
                                 })

    # (optional) build the logging object
    log = Log(experiment_name='mnist',
              description='This is tutorial example',
              save_outputs=True,
              save_learning_rule=True,
              save_model=True,
              save_epoch_error=True,
              save_to_database={
                  'name': 'Example.db',
                  'records': {
                      'Dataset':
                      data.__class__.__name__,
                      'max_col_norm':
                      learning_rule.max_col_norm,
                      'Weight_Init_Seed':
                      mlp.rand_seed,
                      'Dropout_Below':
                      str([layer.dropout_below for layer in mlp.layers]),
                      'Batch_Size':
                      data.batch_size,
                      'Layer_Dim':
                      str([layer.dim for layer in mlp.layers]),
                      'Layer_Types':
                      str([layer.__class__.__name__ for layer in mlp.layers]),
                      'Preprocessor':
                      data.preprocessor.__class__.__name__,
                      'Learning_Rate':
                      learning_method.learning_rate,
                      'Momentum':
                      learning_method.momentum,
                      'Training_Cost':
                      learning_rule.cost.type,
                      'Stopping_Cost':
                      learning_rule.stopping_criteria['cost'].type
                  }
              })  # end log

    # put everything into the train object
    train_object = TrainObject(model=mlp,
                               dataset=data,
                               learning_rule=learning_rule,
                               learning_method=learning_method,
                               log=log)
    # finally run the code
    train_object.run()
示例#3
0
import theano
from pynet.model import MLP
import pynet.layer as layers

floatX = theano.config.floatX

parser = argparse.ArgumentParser(description='''Convert gpu pickle pynet model to cpu pickle pynet model''')
parser.add_argument('--gpu_model', metavar='Path', required=True, help='the path to the gpu model pickle file')
parser.add_argument('--cpu_model', metavar='Path', required=True, help='''path to save the cpu model pickle file''')
args = parser.parse_args()

print ('loading gpu mlp..')
fin = open(args.gpu_model)
gpu_model = cPickle.load(fin)

mlp = MLP(input_dim=gpu_model.input_dim)
for layer in gpu_model.layers:
    layerW = T._shared(np.array(layer.W.get_value(), floatX),
                        name=layer.W.name, borrow=False)
    layerb = T._shared(np.array(layer.b.get_value(), floatX),
                        name=layer.b.name, borrow=False)
    mlp_layer = getattr(layers, layer.__class__.__name__)(dim=layer.dim, name=layer.name,
                                                            W=layerW, b=layerb)
    mlp.add_layer(mlp_layer)
    print 'mlp layer', mlp_layer.name, mlp_layer.dim
print 'layers', mlp.layers

fout = open(args.cpu_model, 'wb')
cPickle.dump(mlp, fout)
print ('Done!')
fin.close()
示例#4
0
def mlp():

    # build dataset
    data = Mnist(preprocessor=None, train_valid_test_ratio=[5,1,1])

    # build mlp
    mlp = MLP(input_dim = data.feature_size())

    W1 = GaussianWeight(prev_dim=mlp.input_dim, this_dim=1000)
    hidden1 = PRELU(dim=1000, name='h1_layer', W=W1(mean=0, std=0.1),
                    b=None, dropout_below=None)

    mlp.add_layer(hidden1)

    W2 = XavierWeight(prev_dim=hidden1.dim, this_dim=data.target_size())
    output = Softmax(dim=data.target_size(), name='output_layer', W=W2(),
                     b=None, dropout_below=None)

    mlp.add_layer(output)

    # build learning method
    learning_method = AdaGrad(learning_rate=0.1, momentum=0.9)

    # set the learning rules
    learning_rule = LearningRule(max_col_norm = 10,
                                L1_lambda = None,
                                L2_lambda = None,
                                training_cost = Cost(type='mse'),
                                learning_rate_decay_factor = None,
                                stopping_criteria = {'max_epoch' : 300,
                                                      'epoch_look_back' : 10,
                                                      'cost' : Cost(type='error'),
                                                      'percent_decrease' : 0.01}
                                )


    # (optional) build the logging object
    log = Log(experiment_name = 'mnist',
              description = 'This is tutorial example',
              save_outputs = True,
              save_learning_rule = True,
              save_model = True,
              save_epoch_error = True,
              save_to_database = {'name': 'Example.db',
                                  'records' : {'Dataset' : data.__class__.__name__,
                                  'max_col_norm'     : learning_rule.max_col_norm,
                                  'Weight_Init_Seed' : mlp.rand_seed,
                                  'Dropout_Below'    : str([layer.dropout_below for layer in mlp.layers]),
                                  'Batch_Size'       : data.batch_size,
                                  'Layer_Dim'        : str([layer.dim for layer in mlp.layers]),
                                  'Layer_Types'      : str([layer.__class__.__name__ for layer in mlp.layers]),
                                  'Preprocessor'     : data.preprocessor.__class__.__name__,
                                  'Learning_Rate'    : learning_method.learning_rate,
                                  'Momentum'         : learning_method.momentum,
                                  'Training_Cost'    : learning_rule.cost.type,
                                  'Stopping_Cost'    : learning_rule.stopping_criteria['cost'].type}}
              ) # end log

    # put everything into the train object
    train_object = TrainObject(model = mlp,
                               dataset = data,
                               learning_rule = learning_rule,
                               learning_method = learning_method,
                               log = log)
    # finally run the code
    train_object.run()